code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Union[str, Any] = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCamelCase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCamelCase : Union[str, Any] = min(_lowerCAmelCase , _lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 52 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a_ : List[Any] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json'''
lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys()
return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) )
def a_ ( ) -> str:
"""simple docstring"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase_ =Path(__snake_case ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]:
"""simple docstring"""
init_hf_modules()
lowerCamelCase_ =Path(__snake_case ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase_ =dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a_ ( __snake_case : Tuple ) -> List[str]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.read()
# Imports of the form `import .xxx`
lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE )
# Unique-ify
return list(set(__snake_case ) )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =[module_file]
lowerCamelCase_ =[]
# Let's recurse through all relative imports
while not no_change:
lowerCamelCase_ =[]
for f in files_to_check:
new_imports.extend(get_relative_imports(__snake_case ) )
lowerCamelCase_ =Path(__snake_case ).parent
lowerCamelCase_ =[str(module_path / m ) for m in new_imports]
lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports]
lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files]
lowerCamelCase_ =len(__snake_case ) == 0
all_relative_imports.extend(__snake_case )
return all_relative_imports
def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.read()
# Imports of the form `import xxx`
lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE )
# Only keep the top-level module
lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowerCamelCase_ =list(set(__snake_case ) )
lowerCamelCase_ =[]
for imp in imports:
try:
importlib.import_module(__snake_case )
except ImportError:
missing_packages.append(__snake_case )
if len(__snake_case ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' )
return get_relative_imports(__snake_case )
def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' )
lowerCamelCase_ =importlib.import_module(__snake_case )
if class_name is None:
return find_pipeline_class(__snake_case )
return getattr(__snake_case , __snake_case )
def a_ ( __snake_case : Dict ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) )
lowerCamelCase_ =None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __snake_case )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowerCamelCase_ =cls
return pipeline_class
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =str(__snake_case )
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ):
lowerCamelCase_ =module_file_or_url
lowerCamelCase_ ='''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowerCamelCase_ =get_diffusers_versions()
# cut ".dev0"
lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowerCamelCase_ =F'''v{revision}'''
elif revision == "main":
lowerCamelCase_ =revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case )
try:
lowerCamelCase_ =cached_download(
__snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , )
lowerCamelCase_ ='''git'''
lowerCamelCase_ =pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowerCamelCase_ =hf_hub_download(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , )
lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowerCamelCase_ =check_imports(__snake_case )
# Now we move the module inside our cached dynamic modules.
lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__snake_case )
lowerCamelCase_ =Path(__snake_case ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__snake_case , submodule_path / module_file )
for module_needed in modules_needed:
lowerCamelCase_ =F'''{module_needed}.py'''
shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =use_auth_token
elif use_auth_token is True:
lowerCamelCase_ =HfFolder.get_token()
else:
lowerCamelCase_ =None
lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCamelCase_ =submodule_path / commit_hash
lowerCamelCase_ =full_submodule + os.path.sep + commit_hash
create_dynamic_module(__snake_case )
if not (submodule_path / module_file).exists():
shutil.copy(__snake_case , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
return os.path.join(__snake_case , __snake_case )
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_cached_module_file(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
| 75 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase_ , n - 1 , UpperCamelCase_ ) * a) % mod
else:
UpperCamelCase_ = binary_exponentiation(UpperCamelCase_ , n / 2 , UpperCamelCase_ )
return (b * b) % mod
# a prime number
_UpperCAmelCase = 7_0_1
_UpperCAmelCase = 1_0_0_0_0_0_0_0_0_0
_UpperCAmelCase = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 328 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _a ( lowerCAmelCase__ ):
__a : List[str] = 42
__a : Dict = 42
__a : Union[str, Any] = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 34 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330 | 0 |
'''simple docstring'''
lowercase : Optional[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase : list[bool | None] = [None] * 1000_0000
lowercase : List[str] = True
lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_snake_case = chain(next_number(__A ) )
_snake_case = number_chain
while number < 10_000_000:
_snake_case = number_chain
number *= 10
return number_chain
def SCREAMING_SNAKE_CASE__ ( __A = 10_000_000 ) -> int:
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 160 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = np.array(__A )
_snake_case = npimg.shape
return {"hash": hashimage(__A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowercase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MaskGenerationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_snake_case = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'facebook/sam-vit-huge'
_snake_case = pipeline('mask-generation' , model=lowerCAmelCase_ )
_snake_case = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
] , )
| 160 | 1 |
'''simple docstring'''
import math
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Dict = 0.0
__a : Optional[int] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self , __a , __a , __a , __a ):
'''simple docstring'''
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase ():
# Training Examples ( m, n )
__a : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__a : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__a : Optional[Any] = SelfOrganizingMap()
__a : Any = 3
__a : Tuple = 0.5
for _ in range(_SCREAMING_SNAKE_CASE ):
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
# training sample
__a : List[str] = training_samples[j]
# Compute the winning vector
__a : Union[str, Any] = self_organizing_map.get_winner(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Update the winning vector
__a : int = self_organizing_map.update(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# classify test sample
__a : Union[str, Any] = [0, 0, 0, 1]
__a : Optional[int] = self_organizing_map.get_winner(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 27 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 0 |
"""simple docstring"""
# Imports
import numpy as np
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Dict=None ):
'''simple docstring'''
self.set_matricies(red=__lowerCAmelCase ,green=__lowerCAmelCase ,blue=__lowerCAmelCase ,red_edge=__lowerCAmelCase ,nir=__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[int]=None ):
'''simple docstring'''
if red is not None:
_lowerCamelCase : List[Any] = red
if green is not None:
_lowerCamelCase : str = green
if blue is not None:
_lowerCamelCase : Dict = blue
if red_edge is not None:
_lowerCamelCase : Union[str, Any] = red_edge
if nir is not None:
_lowerCamelCase : str = nir
return True
def _lowercase ( self: List[str] ,__lowerCAmelCase: int="" ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
self.set_matricies(red=__lowerCAmelCase ,green=__lowerCAmelCase ,blue=__lowerCAmelCase ,red_edge=__lowerCAmelCase ,nir=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowercase ( self: List[str] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowercase ( self: int ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def _lowercase ( self: List[str] ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def _lowercase ( self: int ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowercase ( self: Dict ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def _lowercase ( self: List[str] ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowercase ( self: Tuple ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowercase ( self: Dict ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowercase ( self: str ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0.08 ,__lowerCAmelCase: Tuple=1.22 ,__lowerCAmelCase: str=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowercase ( self: Dict ):
'''simple docstring'''
return (self.nir / self.green) - 1
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def _lowercase ( self: Tuple ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowercase ( self: List[str] ):
'''simple docstring'''
return self.nir - self.green
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str]=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def _lowercase ( self: Dict ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowercase ( self: Any ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return self.nir / self.red
def _lowercase ( self: Any ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowercase ( self: Dict ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowercase ( self: int ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def _lowercase ( self: int ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def _lowercase ( self: str ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def _lowercase ( self: str ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCamelCase : str = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowercase ( self: Any ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return self.nir / self.red
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 340 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written | 340 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :List[Any] = AutoencoderKL
_UpperCAmelCase :str = '''sample'''
_UpperCAmelCase :int = 1e-2
@property
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[Any] =4
lowerCamelCase_ : Optional[int] =3
lowerCamelCase_ : Optional[Any] =(32, 32)
lowerCamelCase_ : Union[str, Any] =floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Any ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Dict ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Any ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCamelCase_ : Optional[int] =self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
pass
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ : Optional[Any] =self.model_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
assert not model.is_gradient_checkpointing and model.training
lowerCamelCase_ : Union[str, Any] =model(**SCREAMING_SNAKE_CASE__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCamelCase_ : Union[str, Any] =torch.randn_like(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : Optional[Any] =(out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCamelCase_ : Optional[Any] =self.model_class(**SCREAMING_SNAKE_CASE__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCamelCase_ : int =model_a(**SCREAMING_SNAKE_CASE__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCamelCase_ : Union[str, Any] =(out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
lowerCamelCase_ : Tuple =dict(model.named_parameters() )
lowerCamelCase_ : Any =dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : List[Any] =AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : List[str] =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[int] =AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
lowerCamelCase_ : int =model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
if torch_device == "mps":
lowerCamelCase_ : Optional[int] =torch.manual_seed(0 )
else:
lowerCamelCase_ : int =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowerCamelCase_ : Dict =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase_ : Tuple =image.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase_ : int =model(SCREAMING_SNAKE_CASE__ , sample_posterior=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).sample
lowerCamelCase_ : Union[str, Any] =output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCamelCase_ : Dict =torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
lowerCamelCase_ : Optional[Any] =torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowerCamelCase_ : int =torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-2 ) )
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] ):
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE__ ) for s in shape] )}.npy"""
def UpperCAmelCase__ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str=0 , snake_case__ : Tuple=(4, 3, 512, 512) , snake_case__ : Optional[Any]=False ):
lowerCamelCase_ : str =torch.floataa if fpaa else torch.floataa
lowerCamelCase_ : Tuple =torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ).to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return image
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str]="CompVis/stable-diffusion-v1-4" , snake_case__ : Tuple=False ):
lowerCamelCase_ : List[Any] ='''fp16''' if fpaa else None
lowerCamelCase_ : List[Any] =torch.floataa if fpaa else torch.floataa
lowerCamelCase_ : int =AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE__ , subfolder="vae" , torch_dtype=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , )
model.to(SCREAMING_SNAKE_CASE__ ).eval()
return model
def UpperCAmelCase__ ( self : int , snake_case__ : Tuple=0 ):
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE__ )
return torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
lowerCamelCase_ : Union[str, Any] =self.get_sd_vae_model()
lowerCamelCase_ : Optional[Any] =self.get_sd_image(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : Optional[Any] =self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase_ : Tuple =model(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , sample_posterior=SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
lowerCamelCase_ : Union[str, Any] =sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCamelCase_ : str =torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[Any] , snake_case__ : int ):
lowerCamelCase_ : str =self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : Optional[Any] =self.get_sd_image(SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : Union[str, Any] =self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase_ : Tuple =model(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , sample_posterior=SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
lowerCamelCase_ : Any =sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCamelCase_ : Union[str, Any] =torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int ):
lowerCamelCase_ : Union[str, Any] =self.get_sd_vae_model()
lowerCamelCase_ : Dict =self.get_sd_image(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase_ : List[Any] =model(SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
lowerCamelCase_ : int =sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCamelCase_ : Optional[Any] =torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Any ):
lowerCamelCase_ : Any =self.get_sd_vae_model()
lowerCamelCase_ : Optional[int] =self.get_sd_image(SCREAMING_SNAKE_CASE__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCamelCase_ : List[str] =model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCamelCase_ : Optional[int] =sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCamelCase_ : Union[str, Any] =torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : Dict ):
lowerCamelCase_ : List[str] =self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : int =self.get_sd_image(SCREAMING_SNAKE_CASE__ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase_ : List[Any] =model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCamelCase_ : Optional[Any] =sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCamelCase_ : Tuple =torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : str =self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : Tuple =self.get_sd_image(SCREAMING_SNAKE_CASE__ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] =model.decode(SCREAMING_SNAKE_CASE__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCamelCase_ : Tuple =model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCAmelCase__ ( self : Any , snake_case__ : List[Any] ):
lowerCamelCase_ : List[Any] =self.get_sd_vae_model()
lowerCamelCase_ : Optional[int] =self.get_sd_image(SCREAMING_SNAKE_CASE__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCamelCase_ : List[str] =model.decode(SCREAMING_SNAKE_CASE__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCamelCase_ : Dict =model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[Any] ):
lowerCamelCase_ : List[Any] =self.get_sd_vae_model()
lowerCamelCase_ : List[str] =self.get_sd_image(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : Tuple =self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] =model.encode(SCREAMING_SNAKE_CASE__ ).latent_dist
lowerCamelCase_ : str =dist.sample(generator=SCREAMING_SNAKE_CASE__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCamelCase_ : Any =sample[0, -1, -3:, -3:].flatten().cpu()
lowerCamelCase_ : List[str] =torch.tensor(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : List[Any] =3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ )
| 144 |
"""simple docstring"""
def __lowerCamelCase ( a_ : list ) -> list:
if len(a_ ) < 2:
return collection
def circle_sort_util(a_ : list , a_ : int , a_ : int ) -> bool:
__SCREAMING_SNAKE_CASE :int = False
if low == high:
return swapped
__SCREAMING_SNAKE_CASE :int = low
__SCREAMING_SNAKE_CASE :Tuple = high
while left < right:
if collection[left] > collection[right]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = (
collection[right],
collection[left],
)
__SCREAMING_SNAKE_CASE :Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = (
collection[right + 1],
collection[left],
)
__SCREAMING_SNAKE_CASE :Tuple = True
__SCREAMING_SNAKE_CASE :int = low + int((high - low) / 2 )
__SCREAMING_SNAKE_CASE :int = circle_sort_util(a_ , a_ , a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = circle_sort_util(a_ , mid + 1 , a_ )
return swapped or left_swap or right_swap
__SCREAMING_SNAKE_CASE :Optional[Any] = True
while is_not_sorted is True:
__SCREAMING_SNAKE_CASE :Tuple = circle_sort_util(a_ , 0 , len(a_ ) - 1 )
return collection
if __name__ == "__main__":
lowerCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase_ = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted)) | 191 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_SCREAMING_SNAKE_CASE = True
except (ImportError, AttributeError):
_SCREAMING_SNAKE_CASE = object
def snake_case ( *snake_case__ :Optional[int] , **snake_case__ :Any) -> int:
pass
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/serving')
def snake_case ( snake_case__ :Namespace) -> Dict:
_A = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(snake_case__ , args.host , args.port , args.workers)
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :dict
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str]
lowerCamelCase :Optional[List[int]]
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Any:
_A = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=lowerCAmelCase_ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=lowerCAmelCase_ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=lowerCAmelCase_ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=lowerCAmelCase_ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=lowerCAmelCase_ , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=lowerCAmelCase_ , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=lowerCAmelCase_ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=lowerCAmelCase_ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = pipeline
_A = host
_A = port
_A = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F'''Serving model over {host}:{port}''' )
_A = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def UpperCAmelCase ( self ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase ( self ) -> Union[str, Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> List[Any]:
try:
_A = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
_A = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} )
def UpperCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Dict:
try:
_A = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model="""""" , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} )
async def UpperCAmelCase ( self , lowerCAmelCase_=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Any:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_A = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(lowerCAmelCase_ )} )
| 81 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case ( ) -> List[Any]:
_A = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
))
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""")
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__)
return parser.parse_args()
def snake_case ( ) -> List[str]:
_A = parse_args()
# Import training_script as a module.
_A = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
_A = script_fpath.stem
_A = importlib.import_module(snake_case__)
# Patch sys.argv
_A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores)]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 81 | 1 |
"""simple docstring"""
from random import randint, random
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = False , lowercase__ = 5 , ):
_lowerCamelCase : Tuple = [[-1] * number_of_cells] # Create a highway without any car
_lowerCamelCase : Any = 0
_lowerCamelCase : str = max(lowercase__ , 0 )
while i < number_of_cells:
_lowerCamelCase : Optional[int] = (
randint(0 , lowercase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(lowercase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase__ , -1 )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : int = len(lowercase__ )
# Beforce calculations, the highway is empty
_lowerCamelCase : Any = [-1] * number_of_cells
for car_index in range(lowercase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_lowerCamelCase : int = min(highway_now[car_index] + 1 , lowercase__ )
# Number of empty cell before the next car
_lowerCamelCase : int = get_distance(lowercase__ , lowercase__ ) - 1
# We can't have the car causing an accident
_lowerCamelCase : Union[str, Any] = min(next_highway[car_index] , lowercase__ )
if random() < probability:
# Randomly, a driver will slow down
_lowerCamelCase : Optional[int] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = len(highway[0] )
for i in range(lowercase__ ):
_lowerCamelCase : Dict = update(highway[i] , lowercase__ , lowercase__ )
_lowerCamelCase : Any = [-1] * number_of_cells
for car_index in range(lowercase__ ):
_lowerCamelCase : List[str] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_lowerCamelCase : str = (car_index + speed) % number_of_cells
# Commit the change of position
_lowerCamelCase : Optional[Any] = speed
highway.append(lowercase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'}
_UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
_UpperCAmelCase = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
_UpperCAmelCase = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 39 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'num_attention_heads' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : str=[1_2_8, 2_5_6, 3_8_4] , lowerCAmelCase_ : Any=[4, 6, 8] , lowerCAmelCase_ : Optional[int]=[2, 3, 4] , lowerCAmelCase_ : List[str]=[1_6, 1_6, 1_6] , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Union[str, Any]=[2, 2, 2] , lowerCAmelCase_ : Optional[Any]=[2, 2, 2] , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=2 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = kernel_size
__lowerCAmelCase = stride
__lowerCAmelCase = padding
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = depths
__lowerCAmelCase = key_dim
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = patch_size
__lowerCAmelCase = attention_ratio
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = initializer_range
__lowerCAmelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> int:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ) -> Optional[int]:
__lowerCAmelCase = LevitModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = (self.image_size, self.image_size)
__lowerCAmelCase , __lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowerCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Optional[int]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LevitForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = LevitModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Optional[Any]:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def lowercase ( self : Dict ) -> str:
pass
@unittest.skip(reason='Levit does not output attentions' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> Dict:
def check_hidden_states_output(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
__lowerCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
__lowerCAmelCase , __lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowerCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Any ) -> Optional[Any]:
pass
def lowercase ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any=False ) -> int:
__lowerCAmelCase = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase ( self : Any ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
__lowerCAmelCase = problem_type['title']
__lowerCAmelCase = problem_type['num_labels']
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__lowerCAmelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list:
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase ( self : str ) -> Optional[int]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = LevitModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Dict ) -> Tuple:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 207 |
from collections.abc import Sequence
def a_ ( lowerCAmelCase_ : Sequence[float], lowerCAmelCase_ : bool = False ):
if not arr:
return 0
__lowerCAmelCase = 0 if allow_empty_subarrays else float('-inf' )
__lowerCAmelCase = 0.0
for num in arr:
__lowerCAmelCase = max(0 if allow_empty_subarrays else num, curr_sum + num )
__lowerCAmelCase = max(lowerCAmelCase_, lowerCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 207 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=64 , _UpperCamelCase=None ) -> Optional[Any]:
lowerCAmelCase_ = np.random.default_rng(_UpperCamelCase )
lowerCAmelCase_ = length
lowerCAmelCase_ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> int:
return self.length
def __getitem__( self , _UpperCamelCase ) -> Dict:
return {"x": self.x[i], "y": self.y[i]}
class _lowerCAmelCase ( torch.nn.Module ):
def __init__( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=False ) -> List[Any]:
super().__init__()
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase=None ) -> Any:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase_ = False
return x * self.a[0] + self.b[0]
class _lowerCAmelCase ( torch.nn.Module ):
def __init__( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=False ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor(_UpperCamelCase ).float() )
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor(_UpperCamelCase ).float() )
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase_ = False
return x * self.a + self.b
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase_ = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
lowerCAmelCase_ = load_dataset("csv" , data_files=__lowerCAmelCase )
lowerCAmelCase_ = datasets["train"].unique("label" )
lowerCAmelCase_ = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
if "label" in examples:
lowerCAmelCase_ = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(__lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
lowerCAmelCase_ = DataLoader(tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 231 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self , _UpperCamelCase ) -> List[str]:
super().__init__()
lowerCAmelCase_ = model
lowerCAmelCase_ = 2
lowerCAmelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __a ( self ) -> Tuple:
pass
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = LongformerModel.from_pretrained(__lowerCAmelCase )
lowerCAmelCase_ = LightningModel(__lowerCAmelCase )
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(__lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 231 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
UpperCAmelCase_= XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase_= torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_= torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_= torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_= model(__UpperCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCAmelCase , atol=1E-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_= XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
UpperCAmelCase_= torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_= torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_= torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_= model(__UpperCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCAmelCase , atol=1E-3 ) )
| 277 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Any , *__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Dict ) -> Optional[int]:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= eval_examples
UpperCAmelCase_= post_process_function
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Optional[Dataset] = None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "eval" , **__UpperCAmelCase : Any , ) -> Dict[str, float]:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase_= (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_= gen_kwargs
UpperCAmelCase_= self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_= self.get_eval_dataloader(__UpperCAmelCase )
UpperCAmelCase_= self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase_= output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_= self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "test" , **__UpperCAmelCase : List[str] ) -> Tuple:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , """predict""" )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 277 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = field(
metadata={'help': 'The csv file to plot.'} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Disable logarithmic scale when plotting'} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
lowerCamelCase = list_field(
default=_UpperCAmelCase , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
'''simple docstring'''
try:
int(SCREAMING_SNAKE_CASE__ )
return True
except ValueError:
return False
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
'''simple docstring'''
try:
float(SCREAMING_SNAKE_CASE__ )
return True
except ValueError:
return False
class A :
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = args
A__ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file,newline='' ) as csv_file:
A__ = csv.DictReader(snake_case__ )
for row in reader:
A__ = row["model"]
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
A__ = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
A__ = float(row['result'] )
def snake_case__ ( self : List[str] )-> Optional[int]:
'''simple docstring'''
A__ = plt.subplots()
A__ = "Time usage" if self.args.is_time else "Memory usage"
A__ = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
A__ = sorted(set(self.result_dict[model_name]['bsz'] ) )
A__ = sorted(set(self.result_dict[model_name]['seq_len'] ) )
A__ = self.result_dict[model_name]["result"]
(A__) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
A__ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
A__ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results],dtype=snake_case__,)
else:
A__ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results],dtype=np.floataa,)
(A__) = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
A__ = np.asarray(snake_case__,snake_case__ )[: len(snake_case__ )]
plt.scatter(
snake_case__,snake_case__,label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(snake_case__,snake_case__,'--' )
title_str += F' {label_model_name} vs.'
A__ = title_str[:-4]
A__ = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(snake_case__ )
plt.xlabel(snake_case__ )
plt.ylabel(snake_case__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _snake_case( ) -> Optional[int]:
'''simple docstring'''
A__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
A__ = parser.parse_args_into_dataclasses()[0]
A__ = Plot(args=SCREAMING_SNAKE_CASE__ )
plot.plot()
if __name__ == "__main__":
main()
| 7 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowerCAmelCase : Tuple = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCAmelCase : List[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | 0 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = [0] * no_of_processes
_a : str = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowercase__ ):
_a : List[Any] = burst_time[i]
_a : list[int] = []
_a : Optional[int] = 0
_a : List[str] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_a : List[str] = []
_a : Optional[int] = -1
for i in range(lowercase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowercase__ )
if len(lowercase__ ) > 0:
_a : Any = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_a : Any = i
total_time += burst_time[target_process]
completed += 1
_a : Union[str, Any] = 0
_a : Optional[Any] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : str = [0] * no_of_processes
for i in range(lowercase__ ):
_a : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
_snake_case = 4
_snake_case = [2, 5, 3, 7]
_snake_case = [0, 0, 0, 0]
_snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_snake_case = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 351 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case = logging.getLogger(__name__)
_snake_case = 'pytorch_model.bin'
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Random seed for initialization.'''} , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_a : Union[str, Any] = dataset.filter(lambda UpperCamelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_a : Any = int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
_a : str = dataset.sort("""probability""" , reverse=UpperCamelCase__ )
_a : Any = dataset.select(range(UpperCamelCase__ ) )
_a : Tuple = dataset.remove_columns(["""label""", """probability"""] )
_a : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
_a : Dict = dataset.map(lambda UpperCamelCase__ : {"label": idalabel[example["label"]]} )
_a : Union[str, Any] = dataset.shuffle(seed=args.seed )
_a : Optional[int] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__ , index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_a : Dict = STModelArguments(model_name_or_path=UpperCamelCase__ )
_a : Union[str, Any] = STDataArguments(train_file=UpperCamelCase__ , infer_file=UpperCamelCase__ )
_a : Any = STTrainingArguments(output_dir=UpperCamelCase__ )
_a : Any = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Sanity checks
_a : Union[str, Any] = {}
_a : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_a : int = args.train_file
_a : List[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_a : Union[str, Any] = args.eval_file
for key in data_files:
_a : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_a : str = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
_a : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
_a : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : str = None
_a : int = None
_a : str = 0
_a : List[Any] = False
# Show the progress bar
_a : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_a : Union[str, Any] = data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_a : str = os.path.join(UpperCamelCase__ , """stage-1""" )
_a : Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
arguments_dict.update({key: value} )
_a : int = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_a : Dict = os.path.join(UpperCamelCase__ , """best-checkpoint""" )
_a : List[str] = os.path.join(UpperCamelCase__ , """stage-2""" )
# Update arguments_dict
_a : int = model_path
_a : Dict = data_files["""train"""]
_a : int = current_output_dir
_a : Any = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , UpperCamelCase__ )
_a : List[Any] = iteration
_a : int = data_dir_format(iteration + 1 )
_a : Dict = AutoConfig.from_pretrained(os.path.join(UpperCamelCase__ , """best-checkpoint""" ) )
_a : Union[str, Any] = config.idalabel
_a : Any = os.path.join(UpperCamelCase__ , """eval_results_best-checkpoint.json""" )
_a : Any = os.path.join(UpperCamelCase__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__ , """r""" ) as f:
_a : Tuple = float(json.load(UpperCamelCase__ )[args.eval_metric] )
_a : Dict = os.path.join(UpperCamelCase__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
_a : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
_a : Any = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : List[str] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_a : Any = eval_result
if best_iteration is None:
_a : Union[str, Any] = new_iteration
_a : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_a : Union[str, Any] = new_iteration
_a : List[str] = new_eval_result
_a : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
_a : Tuple = new_iteration
_a : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_a : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , UpperCamelCase__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
| 324 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def __snake_case( _lowerCAmelCase ) -> Dict:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , _lowerCAmelCase , )
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
snake_case__ : Optional[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case__ , snake_case__ : Tuple = image[0].size
snake_case__ , snake_case__ : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case__ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case__ : Optional[int] = np.concatenate(_lowerCAmelCase , axis=0 )
snake_case__ : Tuple = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
snake_case__ : Tuple = image.transpose(0 , 3 , 1 , 2 )
snake_case__ : Any = 2.0 * image - 1.0
snake_case__ : Optional[int] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
snake_case__ : Any = torch.cat(_lowerCAmelCase , dim=0 )
return image
def __snake_case( _lowerCAmelCase ) -> List[str]:
if isinstance(_lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
snake_case__ : Any = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
snake_case__ , snake_case__ : Optional[int] = mask[0].size
snake_case__ , snake_case__ : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case__ : int = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
snake_case__ : Dict = np.concatenate(_lowerCAmelCase , axis=0 )
snake_case__ : int = mask.astype(np.floataa ) / 255.0
snake_case__ : Optional[Any] = 0
snake_case__ : str = 1
snake_case__ : Dict = torch.from_numpy(_lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
snake_case__ : Optional[Any] = torch.cat(_lowerCAmelCase , dim=0 )
return mask
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = 42
lowercase = 42
def __init__( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Optional[Any] , snake_case_ : Union[torch.Tensor, PIL.Image.Image] , snake_case_ : Union[torch.Tensor, PIL.Image.Image] , snake_case_ : int = 250 , snake_case_ : float = 0.0 , snake_case_ : int = 10 , snake_case_ : int = 10 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
snake_case__ : Optional[Any] = image
snake_case__ : Dict = _preprocess_image(snake_case_ )
snake_case__ : int = original_image.to(device=self.device , dtype=self.unet.dtype )
snake_case__ : int = _preprocess_mask(snake_case_ )
snake_case__ : List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
snake_case__ : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
snake_case__ : Any = original_image.shape
snake_case__ : Any = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case_ , snake_case_ , snake_case_ , self.device )
snake_case__ : Dict = eta
snake_case__ : Optional[int] = self.scheduler.timesteps[0] + 1
snake_case__ : List[str] = generator[0] if isinstance(snake_case_ , snake_case_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case__ : Optional[int] = self.unet(snake_case_ , snake_case_ ).sample
# compute previous image: x_t -> x_t-1
snake_case__ : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case__ : str = self.scheduler.undo_step(snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = t
snake_case__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : str = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 35 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = """"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }') | 363 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ ) | 317 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , __lowercase : Optional[int] , __lowercase : str=7 , __lowercase : List[Any]=3 , __lowercase : Dict=30 , __lowercase : Optional[int]=400 , __lowercase : Tuple=True , __lowercase : Any=None , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowercase : str=[0.5, 0.5, 0.5] , __lowercase : List[str]=True , __lowercase : Tuple=1 / 255 , __lowercase : Any=True , ):
"""simple docstring"""
__lowercase =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
__lowercase =parent
__lowercase =batch_size
__lowercase =num_channels
__lowercase =min_resolution
__lowercase =max_resolution
__lowercase =do_resize
__lowercase =size
__lowercase =do_normalize
__lowercase =image_mean
__lowercase =image_std
__lowercase =do_rescale
__lowercase =rescale_factor
__lowercase =do_pad
def snake_case ( self : List[str] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self : Dict , __lowercase : Dict , __lowercase : str=False ):
"""simple docstring"""
if not batched:
__lowercase =image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__lowercase =image.size
else:
__lowercase =image.shape[1], image.shape[2]
if w < h:
__lowercase =int(self.size['shortest_edge'] * h / w )
__lowercase =self.size['''shortest_edge''']
elif w > h:
__lowercase =self.size['''shortest_edge''']
__lowercase =int(self.size['shortest_edge'] * w / h )
else:
__lowercase =self.size['''shortest_edge''']
__lowercase =self.size['''shortest_edge''']
else:
__lowercase =[]
for image in image_inputs:
__lowercase =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase =max(__lowercase , key=lambda __lowercase : item[0] )[0]
__lowercase =max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase ( A , unittest.TestCase ):
lowerCAmelCase_ = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =DeformableDetrImageProcessingTester(self )
@property
def snake_case ( self : List[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , 'image_mean' ) )
self.assertTrue(hasattr(__lowercase , 'image_std' ) )
self.assertTrue(hasattr(__lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowercase , 'do_resize' ) )
self.assertTrue(hasattr(__lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(__lowercase , 'do_pad' ) )
self.assertTrue(hasattr(__lowercase , 'size' ) )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __lowercase )
__lowercase =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __lowercase )
def snake_case ( self : int ):
"""simple docstring"""
pass
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
__lowercase =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
__lowercase =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowercase =json.loads(f.read() )
__lowercase ={'''image_id''': 39769, '''annotations''': target}
# encode them
__lowercase =DeformableDetrImageProcessor()
__lowercase =image_processing(images=__lowercase , annotations=__lowercase , return_tensors='pt' )
# verify pixel values
__lowercase =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __lowercase )
__lowercase =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __lowercase , atol=1E-4 ) )
# verify area
__lowercase =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __lowercase ) )
# verify boxes
__lowercase =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __lowercase )
__lowercase =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __lowercase , atol=1E-3 ) )
# verify image_id
__lowercase =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __lowercase ) )
# verify is_crowd
__lowercase =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __lowercase ) )
# verify class_labels
__lowercase =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __lowercase ) )
# verify orig_size
__lowercase =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __lowercase ) )
# verify size
__lowercase =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __lowercase ) )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowercase =json.loads(f.read() )
__lowercase ={'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
__lowercase =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowercase =DeformableDetrImageProcessor(format='coco_panoptic' )
__lowercase =image_processing(images=__lowercase , annotations=__lowercase , masks_path=__lowercase , return_tensors='pt' )
# verify pixel values
__lowercase =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __lowercase )
__lowercase =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __lowercase , atol=1E-4 ) )
# verify area
__lowercase =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __lowercase ) )
# verify boxes
__lowercase =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __lowercase )
__lowercase =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __lowercase , atol=1E-3 ) )
# verify image_id
__lowercase =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __lowercase ) )
# verify is_crowd
__lowercase =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __lowercase ) )
# verify class_labels
__lowercase =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __lowercase ) )
# verify masks
__lowercase =822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __lowercase )
# verify orig_size
__lowercase =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __lowercase ) )
# verify size
__lowercase =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __lowercase ) )
| 141 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : nn.ModuleList, UpperCamelCase__ : nn.ModuleList, UpperCamelCase__ : List[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : int =LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(UpperCamelCase__ ) )
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _a( UpperCamelCase__ : Union[str, PreTrainedModel], UpperCamelCase__ : Union[str, Path] = "student", UpperCamelCase__ : Union[int, None] = None, UpperCamelCase__ : Union[int, None] = None, UpperCamelCase__ : List[str]=False, UpperCamelCase__ : Any=None, UpperCamelCase__ : Dict=None, **UpperCamelCase__ : List[Any], ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] ='''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
SCREAMING_SNAKE_CASE__ : str =AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__, UpperCamelCase__ ), f"teacher must be a model or string got type {type(UpperCamelCase__ )}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =teacher.config.to_diff_dict()
try:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
SCREAMING_SNAKE_CASE__ : Dict =teacher_e
if d is None:
SCREAMING_SNAKE_CASE__ : List[Any] =teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config, '''num_encoder_layers''' ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
SCREAMING_SNAKE_CASE__ : Dict =teacher_e
if d is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =teacher_d
if hasattr(teacher.config, '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
SCREAMING_SNAKE_CASE__ : Optional[Any] =teacher.config_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
SCREAMING_SNAKE_CASE__ : Any =student.load_state_dict(teacher.state_dict(), strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
SCREAMING_SNAKE_CASE__ : List[int] =pick_layers_to_copy(UpperCamelCase__, UpperCamelCase__ )
if d_layers_to_copy is None:
SCREAMING_SNAKE_CASE__ : List[int] =pick_layers_to_copy(UpperCamelCase__, UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__, '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block, student.encoder.block, UpperCamelCase__ )
copy_layers(teacher.decoder.block, student.decoder.block, UpperCamelCase__ )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
SCREAMING_SNAKE_CASE__ : List[str] ={
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 152 | 0 |
from __future__ import annotations
__snake_case = 10
def lowerCAmelCase_ ( __lowerCAmelCase )-> list[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =1
UpperCAmelCase : Union[str, Any] =max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCAmelCase : list[list] =[[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCAmelCase : List[Any] =int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
UpperCAmelCase : str =0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
UpperCAmelCase : Union[str, Any] =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | class __snake_case :
def __init__( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =n
UpperCAmelCase : Any =[None] * self.n
UpperCAmelCase : Tuple =0 # index of the first element
UpperCAmelCase : Union[str, Any] =0
UpperCAmelCase : str =0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def UpperCAmelCase__ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase : Tuple =data
UpperCAmelCase : Optional[Any] =(self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase : int =self.array[self.front]
UpperCAmelCase : Any =None
UpperCAmelCase : Tuple =(self.front + 1) % self.n
self.size -= 1
return temp
| 78 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _snake_case ( a__ ):
lowerCAmelCase :Dict = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :str = '''OwlViTImageProcessor'''
lowerCAmelCase :str = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
UpperCAmelCase__ : int = kwargs.pop("""feature_extractor""")
UpperCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_lowerCamelCase , _lowerCamelCase)
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="max_length" , _lowerCamelCase="np" , **_lowerCamelCase):
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""")
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase) or (isinstance(_lowerCamelCase , _lowerCamelCase) and not isinstance(text[0] , _lowerCamelCase)):
UpperCAmelCase__ : str = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase)]
elif isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(text[0] , _lowerCamelCase):
UpperCAmelCase__ : Dict = []
# Maximum number of queries across batch
UpperCAmelCase__ : Optional[Any] = max([len(_lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase) != max_num_queries:
UpperCAmelCase__ : Optional[Any] = t + [""" """] * (max_num_queries - len(_lowerCamelCase))
UpperCAmelCase__ : List[str] = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase)
encodings.append(_lowerCamelCase)
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""")
if return_tensors == "np":
UpperCAmelCase__ : List[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0)
UpperCAmelCase__ : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : Optional[Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0)
UpperCAmelCase__ : Any = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : Tuple = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0)
UpperCAmelCase__ : str = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : str = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0)
UpperCAmelCase__ : Optional[Any] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0)
else:
raise ValueError("""Target return tensor type could not be returned""")
UpperCAmelCase__ : int = BatchEncoding()
UpperCAmelCase__ : Optional[int] = input_ids
UpperCAmelCase__ : str = attention_mask
if query_images is not None:
UpperCAmelCase__ : List[str] = BatchEncoding()
UpperCAmelCase__ : Any = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase).pixel_values
UpperCAmelCase__ : int = query_pixel_values
if images is not None:
UpperCAmelCase__ : Optional[Any] = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase)
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase) , tensor_type=_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def snake_case__ ( self):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor | 163 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__A =threading.Lock()
__A =None
__A ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__A =logging.WARNING
__A =True
def _UpperCamelCase ( ):
UpperCAmelCase__ : str = os.getenv("""TRANSFORMERS_VERBOSITY""" , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def _UpperCamelCase ( ):
return __name__.split(""".""" )[0]
def _UpperCamelCase ( ):
return logging.getLogger(_get_library_name() )
def _UpperCamelCase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase__ : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase__ : Any = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase__ : Union[str, Any] = False
def _UpperCamelCase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase__ : Optional[int] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase__ : Union[str, Any] = None
def _UpperCamelCase ( ):
return log_levels
def _UpperCamelCase ( UpperCamelCase__ = None ):
if name is None:
UpperCAmelCase__ : Union[str, Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase__ )
def _UpperCamelCase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _UpperCamelCase ( UpperCamelCase__ ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase__ )
def _UpperCamelCase ( ):
return set_verbosity(UpperCamelCase__ )
def _UpperCamelCase ( ):
return set_verbosity(UpperCamelCase__ )
def _UpperCamelCase ( ):
return set_verbosity(UpperCamelCase__ )
def _UpperCamelCase ( ):
return set_verbosity(UpperCamelCase__ )
def _UpperCamelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _UpperCamelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _UpperCamelCase ( UpperCamelCase__ ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase__ )
def _UpperCamelCase ( ):
_configure_library_root_logger()
UpperCAmelCase__ : str = False
def _UpperCamelCase ( ):
_configure_library_root_logger()
UpperCAmelCase__ : Optional[int] = True
def _UpperCamelCase ( ):
UpperCAmelCase__ : str = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase__ : List[str] = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : Optional[int] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase__ )
def _UpperCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , UpperCamelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
__A =warning_advice
@functools.lru_cache(UpperCamelCase__ )
def _UpperCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
__A =warning_once
class _snake_case :
def __init__( self , *_lowerCamelCase , **_lowerCamelCase): # pylint: disable=unused-argument
UpperCAmelCase__ : Union[str, Any] = args[0] if args else None
def __iter__( self):
return iter(self._iterator)
def __getattr__( self , _lowerCamelCase):
def empty_fn(*_lowerCamelCase , **_lowerCamelCase): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self):
return self
def __exit__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
return
class _snake_case :
def __call__( self , *_lowerCamelCase , **_lowerCamelCase):
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase)
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__A =_tqdm_cls()
def _UpperCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def _UpperCamelCase ( ):
global _tqdm_active
UpperCAmelCase__ : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def _UpperCamelCase ( ):
global _tqdm_active
UpperCAmelCase__ : List[str] = False
hf_hub_utils.disable_progress_bars() | 163 | 1 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def __lowerCAmelCase ( lowercase : str ) -> datetime:
"""simple docstring"""
snake_case : List[str] = year % 19
snake_case : Dict = year % 4
snake_case : int = year % 7
snake_case : Any = math.floor(year / 100 )
snake_case : Dict = math.floor((13 + 8 * leap_day_inhibits) / 25 )
snake_case : Dict = leap_day_inhibits / 4
snake_case : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case : str = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 18 )
else:
return datetime(__UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__snake_case = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 352 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _A ( _a ):
"""simple docstring"""
def __snake_case ( self : int , __UpperCAmelCase : float):
return 0.0
def lowercase ( A_ , A_ )-> tuple[int | float, int | float]:
'''simple docstring'''
a : Tuple = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
a : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowercase ( A_ , A_ )-> None:
'''simple docstring'''
a : List[str] = 512
a : Optional[Any] = [1] + [0] * (size - 1)
a : Tuple = [filter_type.process(A_ ) for item in inputs]
a : Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
a : int = np.abs(np.fft.fft(A_ ) )
a : Dict = 20 * np.logaa(A_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
a : Union[str, Any] = get_bounds(A_ , A_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(A_ )
plt.show()
def lowercase ( A_ , A_ )-> None:
'''simple docstring'''
a : Dict = 512
a : str = [1] + [0] * (size - 1)
a : Any = [filter_type.process(A_ ) for item in inputs]
a : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
a : Optional[Any] = np.angle(np.fft.fft(A_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(A_ , -2 * pi ) )
plt.show()
| 369 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[int] = TapasConfig.from_json_file(A_ )
# set absolute/relative position embeddings parameter
a : str = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a : Dict = TapasForQuestionAnswering(config=A_ )
elif task == "WTQ":
# run_task_main.py hparams
a : Any = 4
a : Dict = True
# hparam_utils.py hparams
a : str = 0.6_6_4_6_9_4
a : Optional[int] = 0.2_0_7_9_5_1
a : Optional[Any] = 0.1_2_1_1_9_4
a : Union[str, Any] = True
a : int = True
a : Tuple = False
a : Dict = 0.0_3_5_2_5_1_3
a : List[str] = TapasForQuestionAnswering(config=A_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a : Union[str, Any] = 4
a : List[Any] = False
# hparam_utils.py hparams
a : Dict = 3_6.4_5_1_9
a : List[str] = 0.9_0_3_4_2_1
a : Optional[Any] = 2_2_2.0_8_8
a : Dict = True
a : Union[str, Any] = True
a : List[str] = True
a : List[str] = 0.7_6_3_1_4_1
a : Any = TapasForQuestionAnswering(config=A_ )
elif task == "TABFACT":
a : int = TapasForSequenceClassification(config=A_ )
elif task == "MLM":
a : int = TapasForMaskedLM(config=A_ )
elif task == "INTERMEDIATE_PRETRAINING":
a : List[Any] = TapasModel(config=A_ )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A_ , A_ , A_ )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A_ )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
a : Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(A_ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 226 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case_ ( __A ):
__A : str = "cvt"
def __init__( self : Dict , lowercase_ : int=3 , lowercase_ : Optional[Any]=[7, 3, 3] , lowercase_ : Dict=[4, 2, 2] , lowercase_ : List[Any]=[2, 1, 1] , lowercase_ : Union[str, Any]=[64, 1_92, 3_84] , lowercase_ : Union[str, Any]=[1, 3, 6] , lowercase_ : Optional[Any]=[1, 2, 10] , lowercase_ : Tuple=[4.0, 4.0, 4.0] , lowercase_ : Tuple=[0.0, 0.0, 0.0] , lowercase_ : str=[0.0, 0.0, 0.0] , lowercase_ : Union[str, Any]=[0.0, 0.0, 0.1] , lowercase_ : int=[True, True, True] , lowercase_ : str=[False, False, True] , lowercase_ : Dict=["dw_bn", "dw_bn", "dw_bn"] , lowercase_ : Any=[3, 3, 3] , lowercase_ : Dict=[1, 1, 1] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=[1, 1, 1] , lowercase_ : Any=[1, 1, 1] , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , **lowercase_ : Any , ) -> List[Any]:
super().__init__(**lowercase_ )
lowercase__ : Tuple = num_channels
lowercase__ : Optional[Any] = patch_sizes
lowercase__ : Optional[int] = patch_stride
lowercase__ : Tuple = patch_padding
lowercase__ : Dict = embed_dim
lowercase__ : str = num_heads
lowercase__ : Dict = depth
lowercase__ : List[str] = mlp_ratio
lowercase__ : Any = attention_drop_rate
lowercase__ : Union[str, Any] = drop_rate
lowercase__ : Optional[int] = drop_path_rate
lowercase__ : Any = qkv_bias
lowercase__ : Optional[Any] = cls_token
lowercase__ : Optional[Any] = qkv_projection_method
lowercase__ : Optional[Any] = kernel_qkv
lowercase__ : Optional[int] = padding_kv
lowercase__ : Union[str, Any] = stride_kv
lowercase__ : str = padding_q
lowercase__ : Optional[Any] = stride_q
lowercase__ : Tuple = initializer_range
lowercase__ : str = layer_norm_eps
| 87 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
UpperCAmelCase : Optional[Any] = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
UpperCAmelCase : Union[str, Any] = {
"ctrl": 2_56,
}
UpperCAmelCase : List[str] = {
"Pregnancy": 16_86_29,
"Christianity": 76_75,
"Explain": 10_64_23,
"Fitness": 6_34_40,
"Saving": 6_31_63,
"Ask": 2_71_71,
"Ass": 9_59_85,
"Joke": 16_35_09,
"Questions": 4_56_22,
"Thoughts": 4_96_05,
"Retail": 5_23_42,
"Feminism": 16_43_38,
"Writing": 1_19_92,
"Atheism": 19_22_63,
"Netflix": 4_86_16,
"Computing": 3_96_39,
"Opinion": 4_32_13,
"Alone": 4_49_67,
"Funny": 5_89_17,
"Gaming": 4_03_58,
"Human": 40_88,
"India": 13_31,
"Joker": 7_71_38,
"Diet": 3_62_06,
"Legal": 1_18_59,
"Norman": 49_39,
"Tip": 7_26_89,
"Weight": 5_23_43,
"Movies": 4_62_73,
"Running": 2_34_25,
"Science": 20_90,
"Horror": 3_77_93,
"Confession": 6_05_72,
"Finance": 1_22_50,
"Politics": 1_63_60,
"Scary": 19_19_85,
"Support": 1_26_54,
"Technologies": 3_25_16,
"Teenage": 6_61_60,
"Event": 3_27_69,
"Learned": 6_74_60,
"Notion": 18_27_70,
"Wikipedia": 3_75_83,
"Books": 66_65,
"Extract": 7_60_50,
"Confessions": 10_27_01,
"Conspiracy": 7_59_32,
"Links": 6_36_74,
"Narcissus": 15_04_25,
"Relationship": 5_47_66,
"Relationships": 13_47_96,
"Reviews": 4_16_71,
"News": 42_56,
"Translation": 2_68_20,
"multilingual": 12_84_06,
}
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
lowerCamelCase = set()
lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase = char
lowerCamelCase = set(lowerCamelCase__ )
return pairs
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[int] = CONTROL_CODES
def __init__( self , A , A , A="<unk>" , **A ) -> int:
'''simple docstring'''
super().__init__(unk_token=A , **A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase = json.load(A )
lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase = [tuple(merge.split() ) for merge in merges]
lowerCamelCase = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase = {}
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def __A ( self ) -> List[str]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , A ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase = tuple(A )
lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCamelCase = get_pairs(A )
if not pairs:
return token
while True:
lowerCamelCase = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase = bigram
lowerCamelCase = []
lowerCamelCase = 0
while i < len(A ):
try:
lowerCamelCase = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase = tuple(A )
lowerCamelCase = new_word
if len(A ) == 1:
break
else:
lowerCamelCase = get_pairs(A )
lowerCamelCase = """@@ """.join(A )
lowerCamelCase = word[:-4]
lowerCamelCase = word
return word
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = re.findall(r"""\S+\n?""" , A )
for token in words:
split_tokens.extend(list(self.bpe(A ).split(""" """ ) ) )
return split_tokens
def __A ( self , A ) -> int:
'''simple docstring'''
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def __A ( self , A ) -> Any:
'''simple docstring'''
return self.decoder.get(A , self.unk_token )
def __A ( self , A ) -> str:
'''simple docstring'''
lowerCamelCase = """ """.join(A ).replace("""@@ """ , """""" ).strip()
return out_string
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
lowerCamelCase = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 252 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( _lowerCAmelCase ):
_lowerCamelCase = (PNDMScheduler,)
_lowerCamelCase = (('num_inference_steps', 50),)
def UpperCamelCase ( self , **lowercase_ ):
_snake_case : Optional[int] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCamelCase ( self , lowercase_=0 , **lowercase_ ):
_snake_case : int = dict(self.forward_default_kwargs )
_snake_case : Union[str, Any] = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Any = 0.1 * sample
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[int] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
_snake_case : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
_snake_case : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
_snake_case : Union[str, Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
_snake_case : List[Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_snake_case : int = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
_snake_case : Union[str, Any] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self , lowercase_=0 , **lowercase_ ):
_snake_case : List[Any] = dict(self.forward_default_kwargs )
_snake_case : Dict = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE_ )
_snake_case : Tuple = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Tuple = self.get_scheduler_config()
_snake_case : str = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
_snake_case : List[str] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : int = dummy_past_residuals[:]
_snake_case : Any = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
_snake_case : List[str] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_snake_case : Tuple = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
_snake_case : int = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , **lowercase_ ):
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
_snake_case : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
_snake_case : int = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_snake_case : Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_snake_case : List[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def UpperCamelCase ( self ):
_snake_case : List[Any] = dict(self.forward_default_kwargs )
_snake_case : Tuple = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE_ )
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config()
_snake_case : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_ , "set_timesteps" ):
_snake_case : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
_snake_case : Any = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_snake_case : Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
_snake_case : str = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_snake_case : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_snake_case : Optional[Any] = 27
for scheduler_class in self.scheduler_classes:
_snake_case : Dict = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
_snake_case : Optional[int] = self.get_scheduler_config()
_snake_case : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_snake_case : Optional[int] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
def UpperCamelCase ( self ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config()
_snake_case : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase ( self ):
_snake_case : List[str] = self.full_loop()
_snake_case : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
_snake_case : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_580 ) < 1e-3
def UpperCamelCase ( self ):
_snake_case : Optional[int] = self.full_loop(prediction_type="v_prediction" )
_snake_case : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
_snake_case : List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_878 ) < 1e-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
_snake_case : Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
_snake_case : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
_snake_case : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_995 ) < 1e-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
_snake_case : Optional[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
_snake_case : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
_snake_case : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_434 ) < 1e-3
| 366 | def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : Dict = 0
for ch in input_str:
_snake_case : int = ord(__lowercase )
_snake_case : List[Any] = pow(2 , __lowercase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 284 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "dpt"
def __init__( self : Tuple ,lowercase_ : List[Any]=7_6_8 ,lowercase_ : List[Any]=1_2 ,lowercase_ : Optional[Any]=1_2 ,lowercase_ : List[Any]=3_0_7_2 ,lowercase_ : str="gelu" ,lowercase_ : Dict=0.0 ,lowercase_ : Optional[Any]=0.0 ,lowercase_ : List[str]=0.02 ,lowercase_ : List[str]=1E-12 ,lowercase_ : Union[str, Any]=3_8_4 ,lowercase_ : Any=1_6 ,lowercase_ : Tuple=3 ,lowercase_ : List[str]=False ,lowercase_ : Dict=True ,lowercase_ : List[str]=[2, 5, 8, 1_1] ,lowercase_ : Optional[Any]="project" ,lowercase_ : Union[str, Any]=[4, 2, 1, 0.5] ,lowercase_ : int=[9_6, 1_9_2, 3_8_4, 7_6_8] ,lowercase_ : List[str]=2_5_6 ,lowercase_ : int=-1 ,lowercase_ : List[Any]=False ,lowercase_ : List[Any]=True ,lowercase_ : str=0.4 ,lowercase_ : Optional[Any]=2_5_5 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : List[Any]=[1, 1_0_2_4, 2_4, 2_4] ,lowercase_ : int=[0, 1] ,lowercase_ : Optional[int]=None ,**lowercase_ : Optional[Any] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Any = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowerCAmelCase__ : Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
lowerCAmelCase__ : Optional[Any] = BitConfig(**lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowerCAmelCase__ : Tuple = BitConfig(**lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Union[str, Any] = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
lowerCAmelCase__ : Optional[Any] = backbone_featmap_shape
lowerCAmelCase__ : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Union[str, Any] = qkv_bias
lowerCAmelCase__ : Dict = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
lowerCAmelCase__ : str = readout_type
lowerCAmelCase__ : Dict = reassemble_factors
lowerCAmelCase__ : Optional[int] = neck_hidden_sizes
lowerCAmelCase__ : Union[str, Any] = fusion_hidden_size
lowerCAmelCase__ : str = head_in_index
lowerCAmelCase__ : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ : Tuple = use_auxiliary_head
lowerCAmelCase__ : str = auxiliary_loss_weight
lowerCAmelCase__ : Any = semantic_loss_ignore_index
lowerCAmelCase__ : List[Any] = semantic_classifier_dropout
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase__ : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase__ : List[Any] = self.__class__.model_type
return output
| 106 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
pass
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,_a : Any ):
'''simple docstring'''
_a : Any = data
_a : Node | None = None
def __iter__( self : int ):
'''simple docstring'''
_a : int = self
_a : Dict = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_a )
yield node.data
_a : Any = node.next_node
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__lowerCAmelCase = Node(1)
__lowerCAmelCase = Node(2)
__lowerCAmelCase = Node(3)
__lowerCAmelCase = Node(4)
print(root_node.has_loop) # False
__lowerCAmelCase = root_node.next_node
print(root_node.has_loop) # True
__lowerCAmelCase = Node(5)
__lowerCAmelCase = Node(6)
__lowerCAmelCase = Node(5)
__lowerCAmelCase = Node(6)
print(root_node.has_loop) # False
__lowerCAmelCase = Node(1)
print(root_node.has_loop) # False
| 5 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Union[str, Any] ):
UpperCamelCase :Union[str, Any] = """ylacombe/bark-small"""
UpperCamelCase :List[str] = tempfile.mkdtemp()
UpperCamelCase :List[Any] = """en_speaker_1"""
UpperCamelCase :int = """This is a test string"""
UpperCamelCase :int = """speaker_embeddings_path.json"""
UpperCamelCase :Optional[Any] = """speaker_embeddings"""
def _A ( self : int , **__lowerCamelCase : Any ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase )
def _A ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def _A ( self : List[Any] ):
UpperCamelCase :Optional[int] = self.get_tokenizer()
UpperCamelCase :str = BarkProcessor(tokenizer=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _A ( self : int ):
UpperCamelCase :Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCamelCase :Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase :Tuple = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _A ( self : Tuple ):
UpperCamelCase :Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCamelCase :Any = 35
UpperCamelCase :Optional[int] = 2
UpperCamelCase :Tuple = 8
UpperCamelCase :Optional[Any] = {
"""semantic_prompt""": np.ones(__lowerCamelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCamelCase :List[Any] = processor(text=self.input_string , voice_preset=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :Tuple = processor(text=self.input_string , voice_preset=__lowerCamelCase )
UpperCamelCase :Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCamelCase :List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def _A ( self : List[str] ):
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :List[str] = BarkProcessor(tokenizer=__lowerCamelCase )
UpperCamelCase :Tuple = processor(text=self.input_string )
UpperCamelCase :Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 38 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : int ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=__lowercase , )
assert hasattr(self , 'env' )
def snake_case ( self : Tuple , __lowercase : List[str] ):
"""simple docstring"""
__lowercase =f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
__lowercase ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version='py36' , )
def snake_case ( self : int , __lowercase : List[str] ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def snake_case ( self : Tuple , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase =self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
__lowercase =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __lowercase )
| 141 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class UpperCamelCase_ (__A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__magic_name__ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__magic_name__ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
__magic_name__ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
__magic_name__ = "question"
__magic_name__ = "context"
__magic_name__ = "answers"
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 357 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase_ = '''hf-internal-testing/tiny-random-bert'''
lowerCamelCase_ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCamelCase_ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
with open(os.path.join(lowerCAmelCase_ , "refs" , "main" ) ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "snapshots" , lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(os.path.isfile(lowerCAmelCase_ ) )
# File is cached at the same place the second time.
UpperCAmelCase_ : List[str] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Using a specific revision to test the full commit hash.
UpperCAmelCase_ : int = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="9b8c223" )
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "snapshots" , lowerCAmelCase_ , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid model identifier" ):
UpperCAmelCase_ : List[Any] = cached_file("tiny-random-bert" , lowerCAmelCase_ )
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid git identifier" ):
UpperCAmelCase_ : Optional[Any] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="aaaa" )
with self.assertRaisesRegex(lowerCAmelCase_ , "does not appear to have a file named" ):
UpperCAmelCase_ : Union[str, Any] = cached_file(lowerCAmelCase_ , "conf" )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
with self.assertRaisesRegex(lowerCAmelCase_ , "does not appear to have a file named" ):
UpperCAmelCase_ : Any = cached_file(lowerCAmelCase_ , "conf" )
with open(os.path.join(lowerCAmelCase_ , "refs" , "main" ) ) as f:
UpperCAmelCase_ : List[str] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , ".no_exist" , lowerCAmelCase_ , "conf" ) ) )
UpperCAmelCase_ : str = cached_file(lowerCAmelCase_ , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , "conf" , local_files_only=lowerCAmelCase_ , _raise_exceptions_for_missing_entries=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
UpperCAmelCase_ : Any = mock.Mock()
UpperCAmelCase_ : List[str] = 500
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : List[Any] = HTTPError
UpperCAmelCase_ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase_ ) as mock_head:
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowerCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowerCAmelCase_ , revision="ahaha" )
UpperCAmelCase_ : int = get_file_from_repo("bert-base-cased" , lowerCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCAmelCase_ : Optional[int] = json.loads(open(lowerCAmelCase_ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Union[str, Any] = Path(lowerCAmelCase_ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase_ , "a.txt" ) , str(lowerCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase_ , "b.txt" ) )
| 253 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267 | 1 |
'''simple docstring'''
import numpy as np
def UpperCAmelCase_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Tuple ) -> Any:
'''simple docstring'''
_UpperCAmelCase = int(np.ceil((x_end - xa) / h ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(__lowercase ):
_UpperCAmelCase = f(__lowercase , y[k] )
_UpperCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCAmelCase = f(x + h , y[k] + h * ka )
_UpperCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE :List[str] = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[PIL.Image.Image, np.ndarray]
class A_ ( lowerCAmelCase_ ):
def __init__( self : Any , snake_case_ : PriorTransformer , snake_case_ : CLIPVisionModel , snake_case_ : CLIPImageProcessor , snake_case_ : HeunDiscreteScheduler , snake_case_ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , )
def lowercase ( self : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
if latents is None:
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase = latents.to(snake_case_ )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : Optional[Any] , snake_case_ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
def lowercase ( self : List[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] , ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 )
if not isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=snake_case_ )
_UpperCAmelCase = self.image_encoder(snake_case_ )["last_hidden_state"]
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : str , snake_case_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , snake_case_ : int = 1 , snake_case_ : int = 2_5 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : float = 4.0 , snake_case_ : int = 6_4 , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(snake_case_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
_UpperCAmelCase = self.prior(
snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
_UpperCAmelCase = []
for i, latent in enumerate(snake_case_ ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(snake_case_ )
_UpperCAmelCase = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 156 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[str] ={
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] =[
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 262 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = '''new-model'''
if is_tf_available():
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = NewModelConfig
@require_tf
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = '''bert-base-cased'''
__SCREAMING_SNAKE_CASE :int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = '''bert-base-cased'''
__SCREAMING_SNAKE_CASE :List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE :Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE :Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
@require_tensorflow_probability
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__SCREAMING_SNAKE_CASE :int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.num_parameters() ,1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__ ) ,1_44_10 )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.num_parameters() ,1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__ ) ,1_44_10 )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = copy.deepcopy(model.config )
__SCREAMING_SNAKE_CASE :List[str] = ['''FunnelBaseModel''']
__SCREAMING_SNAKE_CASE :int = TFAutoModel.from_config(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''new-model''' ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
auto_class.register(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
auto_class.register(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
auto_class.register(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE :Any = BertModelTester(self ).get_config()
__SCREAMING_SNAKE_CASE :Dict = NewModelConfig(**tiny_config.to_dict() )
__SCREAMING_SNAKE_CASE :Union[str, Any] = auto_class.from_config(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = auto_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,'''bert-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE :int = TFAutoModel.from_pretrained('''bert-base''' )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,revision='''aaaaaa''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,'''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' ,):
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ ,'''Use `from_pt=True` to load this model''' ):
__SCREAMING_SNAKE_CASE :List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE :int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
__SCREAMING_SNAKE_CASE :Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 191 | 0 |
from collections.abc import Callable
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case = None ) -> None:
"""simple docstring"""
# Stores actual heap items.
UpperCAmelCase = []
# Stores indexes of each item for supporting updates and deletion.
UpperCAmelCase = {}
# Stores current size of heap.
UpperCAmelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCAmelCase = key or (lambda _snake_case : x)
def snake_case_ ( self , _snake_case ) -> int | None:
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def snake_case_ ( self , _snake_case ) -> int | None:
"""simple docstring"""
UpperCAmelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case_ ( self , _snake_case ) -> int | None:
"""simple docstring"""
UpperCAmelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case_ ( self , _snake_case , _snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCAmelCase , UpperCAmelCase = self.arr[j], self.arr[i]
def snake_case_ ( self , _snake_case , _snake_case ) -> bool:
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = self._left(_snake_case )
UpperCAmelCase = self._right(_snake_case )
UpperCAmelCase = i
if left is not None and not self._cmp(_snake_case , _snake_case ):
UpperCAmelCase = left
if right is not None and not self._cmp(_snake_case , _snake_case ):
UpperCAmelCase = right
return valid_parent
def snake_case_ ( self , _snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase = self._parent(_snake_case )
while parent is not None and not self._cmp(_snake_case , _snake_case ):
self._swap(_snake_case , _snake_case )
UpperCAmelCase , UpperCAmelCase = parent, self._parent(_snake_case )
def snake_case_ ( self , _snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase = self._get_valid_parent(_snake_case )
while valid_parent != index:
self._swap(_snake_case , _snake_case )
UpperCAmelCase , UpperCAmelCase = valid_parent, self._get_valid_parent(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
UpperCAmelCase = self.pos_map[item]
UpperCAmelCase = [item, self.key(_snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def snake_case_ ( self , _snake_case ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
UpperCAmelCase = self.pos_map[item]
del self.pos_map[item]
UpperCAmelCase = self.arr[self.size - 1]
UpperCAmelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_snake_case )] )
else:
UpperCAmelCase = [item, self.key(_snake_case )]
UpperCAmelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case_ ( self ) -> tuple | None:
"""simple docstring"""
return self.arr[0] if self.size else None
def snake_case_ ( self ) -> tuple | None:
"""simple docstring"""
UpperCAmelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _lowerCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__ = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def _lowerCAmelCase ( A__: List[Any] , A__: int ):
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
UpperCAmelCase = b
UpperCAmelCase = idx
for wd in b:
UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
UpperCAmelCase = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(_snake_case ).strip()
return out_string
def snake_case_ ( self , _snake_case ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = 0
if os.path.isdir(_snake_case ):
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''','''.join(_snake_case ) + '''\n''' )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab # same as swe
UpperCAmelCase = ids_to_tokens # same as bpe
UpperCAmelCase = emoji
UpperCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
UpperCAmelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
UpperCAmelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
UpperCAmelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
UpperCAmelCase = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _snake_case )
UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def snake_case_ ( self , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\r''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' )
UpperCAmelCase = text.replace('''—''' , '''ー''' )
UpperCAmelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
UpperCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
UpperCAmelCase = 0
UpperCAmelCase = []
while pos < len(_snake_case ):
UpperCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
UpperCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
UpperCAmelCase = e
else:
UpperCAmelCase = pos + 1
UpperCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('''<KIGOU>''' )
elif checkuae(_snake_case ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
UpperCAmelCase = end
return result
def snake_case_ ( self , _snake_case , _snake_case="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = ''''''.join(_snake_case )
return text
| 152 | 1 |
from __future__ import annotations
def snake_case ( snake_case__ :Optional[Any]) -> Dict:
_A = str(snake_case__)
return len(snake_case__) == 9 and set(snake_case__) == set("""123456789""")
def snake_case ( ) -> int:
for base_num in range(9_999 , 4_999 , -1):
_A = 100_002 * base_num
if is_9_pandigital(snake_case__):
return candidate
for base_num in range(333 , 99 , -1):
_A = 1_002_003 * base_num
if is_9_pandigital(snake_case__):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180 | from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__a : List[str] = Lock()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__lowercase = min(lowercase , lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__lowercase = max(lowercase , lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
__lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
for i in range(1 , len(lowercase ) - 1 ):
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
process_array_.append(
Process(
target=lowercase , args=(
len(lowercase ) - 1,
arr[len(lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase ) ):
__lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*lowercase )
__lowercase = odd_even_transposition(lowercase )
print('''Sorted List\n''' )
print(*lowercase )
if __name__ == "__main__":
main() | 210 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
A__ : List[str] = XLMRobertaTokenizer
A__ : List[Any] = XLMRobertaTokenizerFast
A__ : List[str] = True
A__ : int = True
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : List[str] = XLMRobertaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : Any = "<pad>"
snake_case : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_02 )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = XLMRobertaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
snake_case : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case : Tuple = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : Tuple = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case : Dict = tempfile.mkdtemp()
snake_case : Any = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case : Optional[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : Optional[int] = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case : List[Any] = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Dict = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : List[Any] = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case : Dict = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
@cached_property
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_SCREAMING_SNAKE_CASE , f.name )
snake_case : Any = XLMRobertaTokenizer(f.name , keep_accents=_SCREAMING_SNAKE_CASE )
snake_case : Any = pickle.dumps(_SCREAMING_SNAKE_CASE )
pickle.loads(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case : Optional[int] = self.get_tokenizer()
snake_case : List[Any] = self.get_rust_tokenizer()
snake_case : str = "I was born in 92000, and this is falsé."
snake_case : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
snake_case : Tuple = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case : str = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : str = self.get_rust_tokenizer()
snake_case : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
snake_case : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = "Hello World!"
snake_case : int = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case : Tuple = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Optional[Any] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : str = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
__UpperCAmelCase : Any = factorials.pop()
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : float = 3.0
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=UpperCamelCase_ ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __lowercase ( self ) -> Union[str, Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_a : Any = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
_a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_a : int = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , UpperCamelCase_ )
@require_multi_gpu
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
a__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
a__ = Accelerator(kwargs_handlers=[ddp_scaler])
a__ = torch.nn.Linear(100, 200)
a__ = accelerator.prepare(model)
# Check the values changed in kwargs
a__ = ''''''
a__ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 356 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 0 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = "Muhammad Umer Farooq"
SCREAMING_SNAKE_CASE__ = "MIT"
SCREAMING_SNAKE_CASE__ = "1.0.0"
SCREAMING_SNAKE_CASE__ = "Muhammad Umer Farooq"
SCREAMING_SNAKE_CASE__ = "contact@muhammadumerfarooq.me"
SCREAMING_SNAKE_CASE__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
super().__init__()
snake_case = []
snake_case = domain
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case = parse.urljoin(self.domain , lowerCAmelCase )
self.urls.append(lowerCAmelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(_UpperCamelCase ).split('.' )[-2:] )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return parse.urlparse(_UpperCamelCase ).netloc
def lowerCAmelCase__ ( _UpperCamelCase : str = "https://github.com" ) -> list[str]:
"""simple docstring"""
snake_case = get_domain_name(_UpperCamelCase )
# Initialize the parser
snake_case = Parser(_UpperCamelCase )
try:
# Open URL
snake_case = requests.get(_UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case = requests.get(_UpperCamelCase )
# Get the valid email.
snake_case = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 150 | """simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
SCREAMING_SNAKE_CASE__ = {"mgp-str": 27}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase , lowerCAmelCase="[GO]" , lowerCAmelCase="[GO]" , lowerCAmelCase="[s]" , lowerCAmelCase="[GO]" , **lowerCAmelCase ):
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
snake_case = json.load(lowerCAmelCase )
snake_case = {v: k for k, v in self.vocab.items()}
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.vocab )
def snake_case ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for s in text:
char_tokens.extend(lowerCAmelCase )
return char_tokens
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase ) )
return
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
return (vocab_file,)
| 150 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCAmelCase :
def __init__( self: Optional[Any] ):
lowercase :int = {}
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any=1 ):
if self.graph.get(_lowerCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase :Dict = [[w, v]]
if not self.graph.get(_lowerCAmelCase ):
lowercase :List[Any] = []
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
return list(self.graph )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str ):
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Optional[Any]=-2 , _lowerCAmelCase: List[str]=-1 ):
if s == d:
return []
lowercase :Tuple = []
lowercase :List[str] = []
if s == -2:
lowercase :Any = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
lowercase :str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase :Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCAmelCase ) != 0:
lowercase :List[str] = stack[len(_lowerCAmelCase ) - 1]
else:
lowercase :Any = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return visited
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Dict=-1 ):
if c == -1:
lowercase :List[str] = floor(random() * 1_00_00 ) + 10
for i in range(_lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
lowercase :int = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCAmelCase , _lowerCAmelCase , 1 )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple=-2 ):
lowercase :Optional[Any] = deque()
lowercase :Tuple = []
if s == -2:
lowercase :Tuple = list(self.graph )[0]
d.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
while d:
lowercase :int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: List[str] ):
lowercase :str = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Optional[int] ):
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[Any]=-2 ):
lowercase :List[str] = []
lowercase :Union[str, Any] = []
if s == -2:
lowercase :Optional[int] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
lowercase :Union[str, Any] = s
lowercase :Union[str, Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase :Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase :Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_lowerCAmelCase ) != 0:
lowercase :int = stack[len(_lowerCAmelCase ) - 1]
else:
lowercase :Dict = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Optional[int] = []
lowercase :Tuple = []
lowercase :Tuple = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
lowercase :str = -2
lowercase :List[Any] = []
lowercase :Dict = s
lowercase :Tuple = False
lowercase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase :Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase :Optional[int] = len(_lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase :int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase :Dict = True
if len(_lowerCAmelCase ) != 0:
lowercase :Optional[int] = stack[len(_lowerCAmelCase ) - 1]
else:
lowercase :Tuple = False
indirect_parents.append(_lowerCAmelCase )
lowercase :Dict = s
lowercase :List[str] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return list(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :List[Any] = []
lowercase :Optional[int] = []
lowercase :str = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
lowercase :List[str] = -2
lowercase :int = []
lowercase :Dict = s
lowercase :Any = False
lowercase :Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase :Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase :Any = len(_lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase :str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase :Union[str, Any] = True
if len(_lowerCAmelCase ) != 0:
lowercase :Optional[int] = stack[len(_lowerCAmelCase ) - 1]
else:
lowercase :Optional[int] = False
indirect_parents.append(_lowerCAmelCase )
lowercase :Dict = s
lowercase :int = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return False
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple=-2 , _lowerCAmelCase: str=-1 ):
lowercase :Tuple = time()
self.dfs(_lowerCAmelCase , _lowerCAmelCase )
lowercase :int = time()
return end - begin
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[Any]=-2 ):
lowercase :Dict = time()
self.bfs(_lowerCAmelCase )
lowercase :Tuple = time()
return end - begin
class __lowerCAmelCase :
def __init__( self: int ):
lowercase :Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: List[str] , _lowerCAmelCase: Any , _lowerCAmelCase: List[str]=1 ):
# check if the u exists
if self.graph.get(_lowerCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase :Dict = [[w, v]]
# add the other way
if self.graph.get(_lowerCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase :int = [[w, u]]
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[str] ):
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCAmelCase )
# the other way round
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Dict=-2 , _lowerCAmelCase: Optional[int]=-1 ):
if s == d:
return []
lowercase :Optional[Any] = []
lowercase :int = []
if s == -2:
lowercase :List[str] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
lowercase :Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase :Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase :Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCAmelCase ) != 0:
lowercase :int = stack[len(_lowerCAmelCase ) - 1]
else:
lowercase :Union[str, Any] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return visited
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Tuple=-1 ):
if c == -1:
lowercase :Tuple = floor(random() * 1_00_00 ) + 10
for i in range(_lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
lowercase :List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCAmelCase , _lowerCAmelCase , 1 )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Dict=-2 ):
lowercase :List[str] = deque()
lowercase :List[Any] = []
if s == -2:
lowercase :Optional[Any] = list(self.graph )[0]
d.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
while d:
lowercase :Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Optional[int] ):
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[int] = []
lowercase :Tuple = []
lowercase :Dict = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
lowercase :Optional[int] = -2
lowercase :Any = []
lowercase :Dict = s
lowercase :List[Any] = False
lowercase :Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase :Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase :Optional[Any] = len(_lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase :Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase :Union[str, Any] = True
if len(_lowerCAmelCase ) != 0:
lowercase :List[Any] = stack[len(_lowerCAmelCase ) - 1]
else:
lowercase :Any = False
indirect_parents.append(_lowerCAmelCase )
lowercase :str = s
lowercase :Tuple = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return list(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :str = []
lowercase :Union[str, Any] = []
lowercase :Any = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
lowercase :str = -2
lowercase :int = []
lowercase :List[str] = s
lowercase :int = False
lowercase :Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase :List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase :List[Any] = len(_lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase :Any = True
if len(_lowerCAmelCase ) != 0:
lowercase :Optional[Any] = stack[len(_lowerCAmelCase ) - 1]
else:
lowercase :List[str] = False
indirect_parents.append(_lowerCAmelCase )
lowercase :Union[str, Any] = s
lowercase :List[Any] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return False
def SCREAMING_SNAKE_CASE ( self: Tuple ):
return list(self.graph )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Tuple=-2 , _lowerCAmelCase: Dict=-1 ):
lowercase :str = time()
self.dfs(_lowerCAmelCase , _lowerCAmelCase )
lowercase :List[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: int=-2 ):
lowercase :str = time()
self.bfs(_lowerCAmelCase )
lowercase :str = time()
return end - begin
| 158 |
def UpperCAmelCase__ ( ):
lowercase :List[str] = 0
for i in range(1, 1001 ):
total += i**i
return str(lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 158 | 1 |
from __future__ import annotations
import time
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =pos_x
_lowercase =pos_y
_lowercase =(pos_y, pos_x)
_lowercase =goal_x
_lowercase =goal_y
_lowercase =parent
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase )
_lowercase =Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase )
_lowercase =[self.start]
_lowercase =False
def A__ ( self ) -> Path | None:
'''simple docstring'''
while self.node_queue:
_lowercase =self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowercase =True
return self.retrace_path(lowerCAmelCase )
_lowercase =self.get_successors(lowerCAmelCase )
for node in successors:
self.node_queue.append(lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def A__ ( self , lowerCAmelCase ) -> list[Node]:
'''simple docstring'''
_lowercase =[]
for action in delta:
_lowercase =parent.pos_x + action[1]
_lowercase =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase , lowerCAmelCase , self.target.pos_y , self.target.pos_x , lowerCAmelCase ) )
return successors
def A__ ( self , lowerCAmelCase ) -> Path:
'''simple docstring'''
_lowercase =node
_lowercase =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowercase =current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =BreadthFirstSearch(lowerCAmelCase , lowerCAmelCase )
_lowercase =BreadthFirstSearch(lowerCAmelCase , lowerCAmelCase )
_lowercase =False
def A__ ( self ) -> Path | None:
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowercase =self.fwd_bfs.node_queue.pop(0 )
_lowercase =self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowercase =True
return self.retrace_bidirectional_path(
lowerCAmelCase , lowerCAmelCase )
_lowercase =current_bwd_node
_lowercase =current_fwd_node
_lowercase ={
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Path:
'''simple docstring'''
_lowercase =self.fwd_bfs.retrace_path(lowerCAmelCase )
_lowercase =self.bwd_bfs.retrace_path(lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
_lowercase =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase_ = time.time()
lowercase_ = BreadthFirstSearch(init, goal)
lowercase_ = bfs.search()
lowercase_ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowercase_ = time.time()
lowercase_ = BidirectionalBreadthFirstSearch(init, goal)
lowercase_ = bd_bfs.search()
lowercase_ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 205 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ = data_utils.TransfoXLTokenizer
lowercase_ = data_utils.TransfoXLCorpus
lowercase_ = data_utils
lowercase_ = data_utils
def a ( A__ : int , A__ : Dict , A__ : Union[str, Any] , A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(A__ , 'rb' ) as fp:
_lowercase =pickle.load(A__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowercase =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_lowercase =corpus.vocab.__dict__
torch.save(A__ , A__ )
_lowercase =corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , A__ )
_lowercase =pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(A__ , A__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowercase =os.path.abspath(A__ )
_lowercase =os.path.abspath(A__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowercase =TransfoXLConfig()
else:
_lowercase =TransfoXLConfig.from_json_file(A__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase =TransfoXLLMHeadModel(A__ )
_lowercase =load_tf_weights_in_transfo_xl(A__ , A__ , A__ )
# Save pytorch-model
_lowercase =os.path.join(A__ , A__ )
_lowercase =os.path.join(A__ , A__ )
print(F'''Save PyTorch model to {os.path.abspath(A__ )}''' )
torch.save(model.state_dict() , A__ )
print(F'''Save configuration file to {os.path.abspath(A__ )}''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 205 | 1 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
with open(lowerCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read(1 )
_UpperCAmelCase : int = start_node
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = start_node
_UpperCAmelCase : Any = 0
while visiting not in first_solution:
_UpperCAmelCase : Optional[int] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase_ ) and k[0] not in first_solution:
_UpperCAmelCase : Optional[int] = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(lowerCAmelCase_ )
_UpperCAmelCase : Dict = best_node
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : int = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(lowerCAmelCase_ )
for kn in solution[1:-1]:
_UpperCAmelCase : int = solution.index(lowerCAmelCase_ )
if n == kn:
continue
_UpperCAmelCase : Tuple = copy.deepcopy(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : List[str] = n
_UpperCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_UpperCAmelCase : List[str] = _tmp[_tmp.index(lowerCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : Dict = distance + int(i[1] )
_tmp.append(lowerCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = first_solution
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = distance_of_first_solution
_UpperCAmelCase : Dict = solution
while count <= iters:
_UpperCAmelCase : Any = find_neighborhood(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) - 1
_UpperCAmelCase : Optional[Any] = False
while not found:
_UpperCAmelCase : Tuple = 0
while i < len(lowerCAmelCase_ ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Any = best_solution[i]
_UpperCAmelCase : str = solution[i]
break
_UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = best_solution[:-1]
_UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : Tuple = cost
_UpperCAmelCase : List[Any] = solution
else:
_UpperCAmelCase : Any = index_of_best_solution + 1
_UpperCAmelCase : Dict = neighborhood[index_of_best_solution]
if len(lowerCAmelCase_ ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : Optional[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( lowerCAmelCase_=None )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = generate_neighbours(args.File )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = generate_first_solution(
args.File , lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase : str = tabu_search(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 349 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 1 |
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCAmelCase : List[str] = TypeVar("T")
class __lowercase ( Generic[T] ):
"""simple docstring"""
def __init__( self , A = True ) -> None:
'''simple docstring'''
lowerCamelCase = {} # dictionary of lists
lowerCamelCase = directed
def __A ( self , A , A ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
self.adj_list[destination_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
lowerCamelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCAmelCase )
lowerCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase = [destination_vertex]
lowerCamelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
lowerCamelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase = [destination_vertex]
lowerCamelCase = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 252 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCAmelCase__ : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase__ : List[Any] = single_char_strings[ch]
lowerCAmelCase__ : List[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase__ : Dict = sum(two_char_strings.values() )
lowerCAmelCase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase__ : Union[str, Any] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase__ : Dict = two_char_strings[sequence]
lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore
lowerCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 37 | 0 |
import baseaa
def A ( _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def A ( _UpperCAmelCase : bytes ) -> str:
'''simple docstring'''
return baseaa.aaadecode(_UpperCAmelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A_ ( A__ , A__ ) -> np.array:
a__ : Optional[Any] = F'{sampling_rate}'
a__ : Optional[int] = '1'
a__ : Any = 'f32le'
a__ : Dict = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
a__ : List[str] = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
a__ : Union[str, Any] = output_stream[0]
a__ : Any = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def A_ ( A__ , A__ , A__ = "f32le" , ) -> int:
a__ : Optional[int] = F'{sampling_rate}'
a__ : Optional[Any] = '1'
if format_for_conversion == "s16le":
a__ : List[str] = 2
elif format_for_conversion == "f32le":
a__ : int = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
a__ : List[Any] = platform.system()
if system == "Linux":
a__ : Any = 'alsa'
a__ : Optional[Any] = 'default'
elif system == "Darwin":
a__ : Union[str, Any] = 'avfoundation'
a__ : Any = ':0'
elif system == "Windows":
a__ : Union[str, Any] = 'dshow'
a__ : Optional[int] = 'default'
a__ : Dict = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
a__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
a__ : str = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def A_ ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ) -> Union[str, Any]:
if stream_chunk_s is not None:
a__ : Union[str, Any] = stream_chunk_s
else:
a__ : Dict = chunk_length_s
a__ : Tuple = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
a__ : str = np.intaa
a__ : Optional[int] = 2
elif format_for_conversion == "f32le":
a__ : Tuple = np.floataa
a__ : Optional[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
a__ : str = chunk_length_s / 6
a__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
a__ : Any = [stride_length_s, stride_length_s]
a__ : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
a__ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
a__ : List[str] = datetime.datetime.now()
a__ : Tuple = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
a__ : str = np.frombuffer(item['raw'] , dtype=A__ )
a__ : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
a__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A_ ( A__ , A__ , A__ , A__ = False ) -> Tuple:
a__ : Optional[Any] = B''
a__ , a__ : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
a__ : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
a__ : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
a__ : int = (_stride_left, stride_right)
a__ : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
a__ : int = False
yield item
a__ : Union[str, Any] = stride_left
a__ : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
a__ : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
a__ : List[str] = False
yield item
def A_ ( A__ , A__ ) -> Tuple:
a__ : Union[str, Any] = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
a__ : str = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 99 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
snake_case_ , snake_case_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
snake_case_ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
snake_case_ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case_ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 214 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__UpperCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod() | 210 |
import random
from typing import Any
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list[Any]:
for _ in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 210 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowercase__ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
lowercase__ = F"https://www.google.com/search?q={query}&num=100"
lowercase__ = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
lowercase__ = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
lowercase__ = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link) | 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ : Tuple =logging.get_logger(__name__)
A__ : List[Any] ={
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = '''bart'''
_lowercase: Union[str, Any] = ['''past_key_values''']
_lowercase: str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : List[str]=10_24 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=40_96 , __snake_case : Tuple=16 , __snake_case : Optional[Any]=12 , __snake_case : List[Any]=40_96 , __snake_case : Dict=16 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]="gelu" , __snake_case : str=10_24 , __snake_case : int=0.1 , __snake_case : int=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]=0.02 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Any=3 , __snake_case : List[Any]=1 , __snake_case : List[str]=0 , __snake_case : Optional[Any]=2 , __snake_case : int=True , __snake_case : List[str]=2 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ) -> Optional[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
_lowerCAmelCase = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase = {0: """batch"""}
_lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(__snake_case ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(__snake_case , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(__snake_case ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowercase__ ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
_lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(__snake_case , __snake_case )
_lowerCAmelCase = max(__snake_case , __snake_case ) - min_num_layers
_lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def lowercase__ ( self : Tuple , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def lowercase__ ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(__snake_case )
_lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def lowercase__ ( self : Dict , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def lowercase__ ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : int ) -> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
_lowerCAmelCase = super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 70 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Tuple = KandinskyInpaintPipeline
_UpperCamelCase : Optional[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCamelCase : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCamelCase : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCamelCase : Any = False
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]:
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> int:
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]:
"""simple docstring"""
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
lowercase__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowercase__ = MultilingualCLIP(a )
lowercase__ = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowercase__ = UNetaDConditionModel(**a )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> str:
"""simple docstring"""
lowercase__ = self.dummy_text_encoder
lowercase__ = self.dummy_tokenizer
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=a , set_alpha_to_one=a , steps_offset=1 , prediction_type='epsilon' , thresholding=a , )
lowercase__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] , a : List[str]=0 )-> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
lowercase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((256, 256) )
# create mask
lowercase__ = np.ones((64, 64) , dtype=np.floataa )
lowercase__ = 0
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
lowercase__ = 'cpu'
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**a )
lowercase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = pipe(**self.get_dummy_inputs(a ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowercase__ = np.ones((768, 768) , dtype=np.floataa )
lowercase__ = 0
lowercase__ = 'a hat'
lowercase__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
lowercase__ = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowercase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowercase__ = pipeline(
a , image=a , mask_image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a )
| 369 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCamelCase : bool = field(default=UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_UpperCamelCase : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCamelCase () -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
lowercase__ = import_module('tasks' )
try:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
lowercase__ , lowercase__ = preds.shape
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 269 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
lowerCamelCase : str = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase : Optional[int] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase : Tuple = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase : Any = model(__A )["last_hidden_state"].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
lowerCamelCase : List[str] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase : int = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase : List[str] = model(__A )["last_hidden_state"].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
| 283 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = StableUnCLIPPipeline
__A : Optional[int] = TEXT_TO_IMAGE_PARAMS
__A : str = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 32
lowerCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase : Dict = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A )
lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , )
torch.manual_seed(0 )
lowerCamelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = AutoencoderKL()
lowerCamelCase : Optional[int] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , __A , __A=0 ):
"""simple docstring"""
if str(__A ).startswith("mps" ):
lowerCamelCase : Optional[int] = torch.manual_seed(__A )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__A )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase : Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 283 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' ,[None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' ,['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( a_ ,a_ ,a_ ) -> Dict:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,'IN_MEMORY_MAX_SIZE' ,a_ )
__UpperCamelCase : Any =datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__UpperCamelCase : List[str] =dataset_size < in_memory_max_size
else:
__UpperCamelCase : Dict =False
__UpperCamelCase : int =is_small_dataset(a_ )
assert result == expected
| 245 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = None ):
"""simple docstring"""
if components is None:
__UpperCamelCase : Dict =[]
__UpperCamelCase : List[str] =list(lowerCamelCase__ )
def __len__( self ):
"""simple docstring"""
return len(self.__components )
def __str__( self ):
"""simple docstring"""
return "(" + ",".join(map(lowerCamelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =len(self )
if size == len(lowerCamelCase__ ):
__UpperCamelCase : Any =[self.__components[i] + other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =len(self )
if size == len(lowerCamelCase__ ):
__UpperCamelCase : Union[str, Any] =[self.__components[i] - other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , (float, int) ):
__UpperCamelCase : str =[c * other for c in self.__components]
return Vector(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(self ) == len(lowerCamelCase__ ):
__UpperCamelCase : Tuple =len(self )
__UpperCamelCase : Union[str, Any] =[self.__components[i] * other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return sum(lowerCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def __lowercase ( self ):
"""simple docstring"""
return Vector(self.__components )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase : List[Any] =value
def __lowercase ( self ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase : Tuple =[c**2 for c in self.__components]
return math.sqrt(sum(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self * other
__UpperCamelCase : str =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A ( a_ ) -> Vector:
assert isinstance(a_ ,a_ )
return Vector([0] * dimension )
def A ( a_ ,a_ ) -> Vector:
assert isinstance(a_ ,a_ ) and (isinstance(a_ ,a_ ))
__UpperCamelCase : Tuple =[0] * dimension
__UpperCamelCase : List[str] =1
return Vector(a_ )
def A ( a_ ,a_ ,a_ ) -> Vector:
assert (
isinstance(a_ ,a_ )
and isinstance(a_ ,a_ )
and (isinstance(a_ ,(int, float) ))
)
return x * scalar + y
def A ( a_ ,a_ ,a_ ) -> Vector:
random.seed(a_ )
__UpperCamelCase : List[Any] =[random.randint(a_ ,a_ ) for _ in range(a_ )]
return Vector(a_ )
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =matrix
__UpperCamelCase : List[str] =w
__UpperCamelCase : int =h
def __str__( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCamelCase__ ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase : int =[]
for i in range(self.__height ):
__UpperCamelCase : str =[
self.__matrix[i][j] + other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , lowerCamelCase__ ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase : str =[]
for i in range(self.__height ):
__UpperCamelCase : Optional[int] =[
self.__matrix[i][j] - other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # matrix-vector
if len(lowerCamelCase__ ) == self.__width:
__UpperCamelCase : Dict =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase : Optional[Any] =[
self.__matrix[i][j] * other.component(lowerCamelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCamelCase__ , sum(lowerCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCamelCase__ , (int, float) ): # matrix-scalar
__UpperCamelCase : Any =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCamelCase__ , self.__width , self.__height )
return None
def __lowercase ( self ):
"""simple docstring"""
return self.__height
def __lowercase ( self ):
"""simple docstring"""
return self.__width
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase : Tuple =value
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase : Any =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCamelCase__ ) ):
__UpperCamelCase : Optional[Any] =minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCamelCase__ , lowerCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def __lowercase ( self ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase : Tuple =[
self.__matrix[0][y] * self.cofactor(0 , lowerCamelCase__ ) for y in range(self.__width )
]
return sum(lowerCamelCase__ )
def A ( a_ ) -> Matrix:
__UpperCamelCase : list[list[float]] =[[0] * n for _ in range(a_ )]
return Matrix(a_ ,a_ ,a_ )
def A ( a_ ,a_ ,a_ ,a_ ) -> Matrix:
random.seed(a_ )
__UpperCamelCase : list[list[float]] =[
[random.randint(a_ ,a_ ) for _ in range(a_ )] for _ in range(a_ )
]
return Matrix(a_ ,a_ ,a_ )
| 245 | 1 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a):
lowercase__ : List[Any] = data
lowercase__ : str = None
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Any = None
def snake_case_ ( self):
lowercase__ : List[str] = self.head
while temp is not None:
print(temp.data , end=' ')
lowercase__ : Union[str, Any] = temp.next
print()
def snake_case_ ( self , a):
lowercase__ : Any = Node(_UpperCAmelCase)
lowercase__ : str = self.head
lowercase__ : Dict = new_node
def snake_case_ ( self , a , a):
if node_data_a == node_data_a:
return
else:
lowercase__ : Any = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase__ : Union[str, Any] = node_a.next
lowercase__ : Union[str, Any] = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase__ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
lowercase__ , lowercase__ : Optional[Any] = node_a.data, node_a.data
if __name__ == "__main__":
snake_case_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 214 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 0 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = val
A__ = None
A__ = None
def UpperCamelCase ( self,__lowerCamelCase ):
if self.val:
if val < self.val:
if self.left is None:
A__ = Node(__lowerCamelCase )
else:
self.left.insert(__lowerCamelCase )
elif val > self.val:
if self.right is None:
A__ = Node(__lowerCamelCase )
else:
self.right.insert(__lowerCamelCase )
else:
A__ = val
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : Tuple )->Union[str, Any]:
# Recursive traversal
if root:
inorder(root.left , UpperCamelCase__ )
res.append(root.val )
inorder(root.right , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Any )->Any:
# Build BST
if len(UpperCamelCase__ ) == 0:
return arr
A__ = Node(arr[0] )
for i in range(1 , len(UpperCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A__ = []
inorder(UpperCamelCase__ , UpperCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 361 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 39 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """unispeech-sat"""
def __init__( self : str , a_ : Optional[int]=32 , a_ : Tuple=7_68 , a_ : str=12 , a_ : List[str]=12 , a_ : Optional[int]=30_72 , a_ : Any="gelu" , a_ : Tuple=0.1 , a_ : str=0.1 , a_ : List[str]=0.1 , a_ : Optional[int]=0.0 , a_ : Tuple=0.0 , a_ : List[str]=0.1 , a_ : List[str]=0.1 , a_ : Union[str, Any]=0.0_2 , a_ : Optional[Any]=1e-5 , a_ : List[Any]="group" , a_ : List[str]="gelu" , a_ : str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a_ : str=(5, 2, 2, 2, 2, 2, 2) , a_ : Dict=(10, 3, 3, 3, 3, 2, 2) , a_ : Any=False , a_ : int=1_28 , a_ : Tuple=16 , a_ : Optional[Any]=False , a_ : int=True , a_ : List[Any]=0.0_5 , a_ : str=10 , a_ : int=2 , a_ : Dict=0.0 , a_ : str=10 , a_ : Optional[Any]=0 , a_ : Union[str, Any]=3_20 , a_ : Union[str, Any]=2 , a_ : Union[str, Any]=0.1 , a_ : str=1_00 , a_ : List[str]=2_56 , a_ : Optional[Any]=2_56 , a_ : Union[str, Any]=0.1 , a_ : Optional[Any]="mean" , a_ : int=False , a_ : Tuple=False , a_ : Any=2_56 , a_ : int=(5_12, 5_12, 5_12, 5_12, 15_00) , a_ : Optional[Any]=(5, 3, 3, 1, 1) , a_ : Union[str, Any]=(1, 2, 3, 1, 1) , a_ : int=5_12 , a_ : Tuple=0 , a_ : List[str]=1 , a_ : Optional[int]=2 , a_ : Union[str, Any]=5_04 , **a_ : List[str] , ):
'''simple docstring'''
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Any = feat_extract_norm
__UpperCAmelCase : int = feat_extract_activation
__UpperCAmelCase : Tuple = list(a_ )
__UpperCAmelCase : List[str] = list(a_ )
__UpperCAmelCase : str = list(a_ )
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : List[str] = num_conv_pos_embeddings
__UpperCAmelCase : Any = num_conv_pos_embedding_groups
__UpperCAmelCase : List[str] = len(self.conv_dim )
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : str = hidden_dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : List[str] = feat_proj_dropout
__UpperCAmelCase : Union[str, Any] = final_dropout
__UpperCAmelCase : int = layerdrop
__UpperCAmelCase : Tuple = layer_norm_eps
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Optional[int] = num_clusters
__UpperCAmelCase : Dict = do_stable_layer_norm
__UpperCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : List[Any] = apply_spec_augment
__UpperCAmelCase : List[Any] = mask_time_prob
__UpperCAmelCase : List[Any] = mask_time_length
__UpperCAmelCase : List[Any] = mask_time_min_masks
__UpperCAmelCase : List[str] = mask_feature_prob
__UpperCAmelCase : Any = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCAmelCase : List[str] = num_codevectors_per_group
__UpperCAmelCase : Optional[Any] = num_codevector_groups
__UpperCAmelCase : Dict = contrastive_logits_temperature
__UpperCAmelCase : List[Any] = feat_quantizer_dropout
__UpperCAmelCase : Optional[int] = num_negatives
__UpperCAmelCase : Optional[int] = codevector_dim
__UpperCAmelCase : Optional[int] = proj_codevector_dim
__UpperCAmelCase : Any = diversity_loss_weight
# ctc loss
__UpperCAmelCase : Tuple = ctc_loss_reduction
__UpperCAmelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCAmelCase : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCAmelCase : Tuple = list(a_ )
__UpperCAmelCase : int = list(a_ )
__UpperCAmelCase : Union[str, Any] = list(a_ )
__UpperCAmelCase : str = xvector_output_dim
@property
def snake_case__ ( self : Dict ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 226 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 226 | 1 |
'''simple docstring'''
import heapq
def __lowerCamelCase ( lowerCAmelCase_ ) -> set[int]:
_a : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCAmelCase_ , [-1 * len(lowerCAmelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_a : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_a : Optional[int] = heapq.heappop(lowerCAmelCase_ )[1][0]
chosen_vertices.add(lowerCAmelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_a : Dict = elem[1][1].index(lowerCAmelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCAmelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 107 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
for i in range(0 , lowerCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
for i in range(lowerCAmelCase_ , 0 , -1 ):
for _ in range(lowerCAmelCase_ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__lowerCAmelCase = 1
while K:
__lowerCAmelCase = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__lowerCAmelCase = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 107 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
_snake_case = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
_snake_case = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = BartTokenizer
def __init__( self, __a=None, __a=None, __a=None, __a="replace", __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=False, __a=True, **__a, ):
'''simple docstring'''
super().__init__(
__a, __a, tokenizer_file=__a, errors=__a, bos_token=__a, eos_token=__a, sep_token=__a, cls_token=__a, unk_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, trim_offsets=__a, **__a, )
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", __a) != add_prefix_space:
_lowerCAmelCase : List[Any] = getattr(__a, pre_tok_state.pop("type"))
_lowerCAmelCase : Any = add_prefix_space
_lowerCAmelCase : Any = pre_tok_class(**__a)
_lowerCAmelCase : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase : Tuple = "post_processor"
_lowerCAmelCase : Union[str, Any] = getattr(self.backend_tokenizer, __a, __a)
if tokenizer_component_instance:
_lowerCAmelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Tuple = tuple(state["sep"])
if "cls" in state:
_lowerCAmelCase : List[Any] = tuple(state["cls"])
_lowerCAmelCase : List[str] = False
if state.get("add_prefix_space", __a) != add_prefix_space:
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : List[Any] = True
if state.get("trim_offsets", __a) != trim_offsets:
_lowerCAmelCase : Any = trim_offsets
_lowerCAmelCase : Union[str, Any] = True
if changes_to_apply:
_lowerCAmelCase : Optional[int] = getattr(__a, state.pop("type"))
_lowerCAmelCase : Optional[Any] = component_class(**__a)
setattr(self.backend_tokenizer, __a, __a)
@property
def snake_case__ ( self):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else value
_lowerCAmelCase : Optional[int] = value
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get("is_split_into_words", __a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = kwargs.get("is_split_into_words", __a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._encode_plus(*__a, **__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 36 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCAmelCase : Optional[int] = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def A ( _lowerCamelCase ):
'''simple docstring'''
if "stem.conv" in name:
_lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCAmelCase : Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowerCAmelCase : Any = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowerCAmelCase : Dict = "bit.encoder." + name
return name
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
# load original model from timm
_lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
_lowerCAmelCase : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = transform.transforms
_lowerCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCAmelCase : Tuple = BitImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_snake_case = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
UpperCamelCase = {'''bert_for_seq_generation''': 512}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : int = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[int] = []
__snake_case : int = ["input_ids", "attention_mask"]
def __init__( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Optional[Any]="<s>" , UpperCAmelCase_: Optional[int]="</s>" , UpperCAmelCase_: Any="<unk>" , UpperCAmelCase_: Any="<pad>" , UpperCAmelCase_: Optional[int]="<::::>" , UpperCAmelCase_: Optional[Dict[str, Any]] = None , **UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self: Any , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def UpperCamelCase ( self: str , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(UpperCAmelCase_ )
return token
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 125 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = TransfoXLTokenizer
__snake_case : Tuple = False
__snake_case : List[Any] = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase ( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """<unk> UNwanted , running"""
_SCREAMING_SNAKE_CASE = """<unk> unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(UpperCAmelCase_ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [0, 4, 8, 7] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_SCREAMING_SNAKE_CASE = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 125 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ):
_A : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_A : int = Image.open(requests.get(_lowerCamelCase,stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def lowerCAmelCase_ ( snake_case_ ):
_A : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = dct.pop(_lowerCamelCase )
_A : str = val
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_A : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
_A : Optional[Any] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_A : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase,requires_grad=_lowerCamelCase ), v_bias) )
_A : str = qkv_bias
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : str = 364 if """coco""" in model_name else 224
_A : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_A : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""",eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_A : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""",eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_A : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""",dense_act_fn="""gelu""",bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_A : str = TaConfig.from_pretrained("""google/flan-t5-xxl""",dense_act_fn="""gelu""",bos_token_id=1 ).to_dict()
_A : Dict = BlipaConfig(vision_config=_lowerCamelCase,text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_=None,snake_case_=False ):
_A : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_A : List[Any] = tokenizer("""\n""",add_special_tokens=_lowerCamelCase ).input_ids[0]
_A : List[str] = get_blipa_config(_lowerCamelCase,eos_token_id=_lowerCamelCase )
_A : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_A : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_A : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_A : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_A : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase,model_type=_lowerCamelCase,is_eval=_lowerCamelCase,device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_A : List[Any] = original_model.state_dict()
_A : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase,_lowerCamelCase,_lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_A : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_A : List[Any] = key.replace("""Qformer.bert""","""qformer""" )
if "attention.self" in key:
_A : Optional[int] = key.replace("""self""","""attention""" )
if "opt_proj" in key:
_A : Dict = key.replace("""opt_proj""","""language_projection""" )
if "t5_proj" in key:
_A : Tuple = key.replace("""t5_proj""","""language_projection""" )
if key.startswith("""opt""" ):
_A : List[Any] = key.replace("""opt""","""language""" )
if key.startswith("""t5""" ):
_A : int = key.replace("""t5""","""language""" )
_A : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase,_lowerCamelCase )
_A : Optional[int] = hf_model.load_state_dict(_lowerCamelCase,strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_A : Union[str, Any] = load_demo_image()
_A : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_A : List[str] = tokenizer(["""\n"""],return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_A : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size},image_mean=_lowerCamelCase,image_std=_lowerCamelCase )
_A : Tuple = BlipaProcessor(image_processor=_lowerCamelCase,tokenizer=_lowerCamelCase )
_A : Any = processor(images=_lowerCamelCase,return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase,_lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_A : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_A : Optional[Any] = hf_model(_lowerCamelCase,_lowerCamelCase ).logits
else:
_A : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_A : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id,-100 )
_A : Dict = hf_model(_lowerCamelCase,_lowerCamelCase,labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""",original_logits[0, :3, :3] )
print("""First values of HF logits:""",logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_A : Any = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]],device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3],_lowerCamelCase,atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_A : List[Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]],device=_lowerCamelCase )
else:
# cast to same type
_A : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ),_lowerCamelCase,atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_A : Optional[int] = """"""
_A : Union[str, Any] = tokenizer(_lowerCamelCase,return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_A : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_A : Dict = hf_model.generate(
_lowerCamelCase,_lowerCamelCase,do_sample=_lowerCamelCase,num_beams=5,max_length=30,min_length=1,top_p=0.9,repetition_penalty=1.0,length_penalty=1.0,temperature=1,)
print("""Original generation:""",_lowerCamelCase )
_A : int = input_ids.shape[1]
_A : str = processor.batch_decode(outputs[:, prompt_length:],skip_special_tokens=_lowerCamelCase )
_A : List[str] = [text.strip() for text in output_text]
print("""HF generation:""",_lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
_snake_case = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 26 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 0 |
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : int =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(lowerCAmelCase__ ) != 0:
a__ : List[str] =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
a__ : List[Any] =rows
else:
a__ : str =[]
def _lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def _lowercase ( self ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _lowercase ( self ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : str =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowercase ( self ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : List[str] =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Dict =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Union[str, Any] =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(lowerCAmelCase__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : List[str] =TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
a__ : Tuple =self.rows[0:position] + [row] + self.rows[position:]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : str =TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
a__ : Optional[Any] =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
a__ : Any =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
a__ : Tuple =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] =parent
a__ : Tuple =batch_size
a__ : List[Any] =seq_length
a__ : Dict =is_training
a__ : Any =use_input_mask
a__ : int =use_token_type_ids
a__ : Optional[Any] =use_labels
a__ : Optional[Any] =vocab_size
a__ : List[str] =hidden_size
a__ : int =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Union[str, Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : int =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : str =type_vocab_size
a__ : Optional[Any] =type_sequence_label_size
a__ : Union[str, Any] =initializer_range
a__ : List[Any] =num_labels
a__ : str =num_choices
a__ : int =scope
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : str =None
if self.use_input_mask:
a__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
a__ : str =None
if self.use_token_type_ids:
a__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : Dict =None
a__ : str =None
a__ : str =None
if self.use_labels:
a__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Dict =ids_tensor([self.batch_size] , self.num_choices )
a__ : Tuple =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Tuple =NystromformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a__ : str =model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a__ : Optional[int] =model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : int =NystromformerForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Dict =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : Optional[int] =NystromformerForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : str =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] =self.num_labels
a__ : Dict =NystromformerForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[str] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =self.num_labels
a__ : List[str] =NystromformerForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =self.num_choices
a__ : Optional[Any] =NystromformerForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Dict =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[str] =config_and_inputs
a__ : str ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : int = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : Union[str, Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Union[str, Any] = False
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] =NystromformerModelTester(self )
a__ : Optional[int] =ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : int =type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int =NystromformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
a__ : int =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
a__ : Tuple =model(lowerCAmelCase__ )[0]
a__ : List[str] =torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
a__ : int =torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] ="the [MASK] of Belgium is Brussels"
a__ : str =AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
a__ : int =NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
a__ : List[Any] =tokenizer(lowerCAmelCase__ , return_tensors="pt" )
with torch.no_grad():
a__ : str =model(encoding.input_ids ).logits
a__ : List[str] =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowerCAmelCase__ ) , "capital" )
| 148 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( *_a , **_a ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__a = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __UpperCAmelCase ( self , _a , _a ):
__a = object_detector(examples[0] , threshold=0.0 )
__a = len(_a )
self.assertGreater(_a , 0 )
self.assertEqual(
_a , [
{
'''score''': ANY(_a ),
'''label''': ANY(_a ),
'''box''': {'''xmin''': ANY(_a ), '''ymin''': ANY(_a ), '''xmax''': ANY(_a ), '''ymax''': ANY(_a )},
}
for i in range(_a )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __UpperCAmelCase ( self ):
pass
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__a = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
__a = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-object-detection''' )
__a = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
__a = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __UpperCAmelCase ( self ):
pass
@require_torch
@slow
def __UpperCAmelCase ( self ):
__a = 0.2
__a = pipeline('''zero-shot-object-detection''' )
__a = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=_a , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def __UpperCAmelCase ( self ):
__a = 2
__a = pipeline('''zero-shot-object-detection''' )
__a = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=_a , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 45 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case ( unittest.TestCase ):
_a : Optional[int]= MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = hf_hub_download(
repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
lowercase : List[str] = VideoClassificationPipeline(model=snake_case ,image_processor=snake_case ,top_k=2 )
lowercase : Dict = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
for example in examples:
lowercase : int = video_classifier(snake_case )
self.assertEqual(
snake_case ,[
{"""score""": ANY(snake_case ), """label""": ANY(snake_case )},
{"""score""": ANY(snake_case ), """label""": ANY(snake_case )},
] ,)
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase : str = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} ,crop_size={"""height""": 10, """width""": 10} )
lowercase : List[Any] = pipeline(
"""video-classification""" ,model=snake_case ,feature_extractor=snake_case ,frame_sampling_rate=4 )
lowercase : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
lowercase : Any = video_classifier(snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] ,)
lowercase : str = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] ,)
@require_tf
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
| 20 | 0 |
from math import pow, sqrt
def __snake_case ( *__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = len(A_ ) > 0 and all(value > 0.0 for value in values )
return result
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(A_ ,A_ )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(A_ ,A_ ,A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(A_ ,A_ ,A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(A_ ,A_ ,A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(A_ ,A_ ,A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , ) | 329 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
_SCREAMING_SNAKE_CASE : List[str] = None
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
def remove_articles(snake_case : Tuple ):
return ARTICLES_REGEX.sub(" " , snake_case )
def white_space_fix(snake_case : Dict ):
return " ".join(text.split() )
def remove_punc(snake_case : Optional[Any] ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case ) ) ) )
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
if not s:
return []
return normalize_answer(snake_case ).split()
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : int ):
'''simple docstring'''
return int(normalize_answer(snake_case ) == normalize_answer(snake_case ) )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : int ):
'''simple docstring'''
snake_case_ = get_tokens(snake_case )
snake_case_ = get_tokens(snake_case )
snake_case_ = collections.Counter(snake_case ) & collections.Counter(snake_case )
snake_case_ = sum(common.values() )
if len(snake_case ) == 0 or len(snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(snake_case )
snake_case_ = 1.0 * num_same / len(snake_case )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase_( snake_case : str , snake_case : str ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = qa["id"]
snake_case_ = [t for t in qa["answers"]["text"] if normalize_answer(snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case_ = [""]
if qid not in preds:
print(f'Missing prediction for {qid}' )
continue
snake_case_ = preds[qid]
# Take max over all gold answers
snake_case_ = max(compute_exact(snake_case , snake_case ) for a in gold_answers )
snake_case_ = max(compute_fa(snake_case , snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase_( snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : int ):
'''simple docstring'''
snake_case_ = {}
for qid, s in scores.items():
snake_case_ = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case_ = float(not qid_to_has_ans[qid] )
else:
snake_case_ = s
return new_scores
def UpperCamelCase_( snake_case : Tuple , snake_case : str , snake_case : Union[str, Any]=None ):
'''simple docstring'''
if not qid_list:
snake_case_ = len(snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
snake_case_ = len(snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
for k in new_eval:
snake_case_ = new_eval[k]
def UpperCamelCase_( snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : str ):
'''simple docstring'''
plt.step(snake_case , snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(snake_case , snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(snake_case )
plt.savefig(snake_case )
plt.clf()
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : int , snake_case : Optional[Any]=None , snake_case : Optional[Any]=None ):
'''simple docstring'''
snake_case_ = sorted(snake_case , key=lambda snake_case : na_probs[k] )
snake_case_ = 0.0
snake_case_ = 1.0
snake_case_ = 0.0
snake_case_ = [1.0]
snake_case_ = [0.0]
snake_case_ = 0.0
for i, qid in enumerate(snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case_ = true_pos / float(i + 1 )
snake_case_ = true_pos / float(snake_case )
if i == len(snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case )
recalls.append(snake_case )
if out_image:
plot_pr_curve(snake_case , snake_case , snake_case , snake_case )
return {"ap": 100.0 * avg_prec}
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : Dict , snake_case : List[str] , snake_case : Dict ):
'''simple docstring'''
if out_image_dir and not os.path.exists(snake_case ):
os.makedirs(snake_case )
snake_case_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case_ = make_precision_recall_eval(
snake_case , snake_case , snake_case , snake_case , out_image=os.path.join(snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
snake_case_ = make_precision_recall_eval(
snake_case , snake_case , snake_case , snake_case , out_image=os.path.join(snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
snake_case_ = {k: float(snake_case ) for k, v in qid_to_has_ans.items()}
snake_case_ = make_precision_recall_eval(
snake_case , snake_case , snake_case , snake_case , out_image=os.path.join(snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case , snake_case , "pr_exact" )
merge_eval(snake_case , snake_case , "pr_f1" )
merge_eval(snake_case , snake_case , "pr_oracle" )
def UpperCamelCase_( snake_case : int , snake_case : Optional[int] , snake_case : str , snake_case : Any ):
'''simple docstring'''
if not qid_list:
return
snake_case_ = [na_probs[k] for k in qid_list]
snake_case_ = np.ones_like(snake_case ) / float(len(snake_case ) )
plt.hist(snake_case , weights=snake_case , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(snake_case , f'na_prob_hist_{name}.png' ) )
plt.clf()
def UpperCamelCase_( snake_case : Dict , snake_case : int , snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case_ = num_no_ans
snake_case_ = cur_score
snake_case_ = 0.0
snake_case_ = sorted(snake_case , key=lambda snake_case : na_probs[k] )
for i, qid in enumerate(snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case_ = scores[qid]
else:
if preds[qid]:
snake_case_ = -1
else:
snake_case_ = 0
cur_score += diff
if cur_score > best_score:
snake_case_ = cur_score
snake_case_ = na_probs[qid]
return 100.0 * best_score / len(snake_case ), best_thresh
def UpperCamelCase_( snake_case : Dict , snake_case : List[Any] , snake_case : int , snake_case : str , snake_case : Dict , snake_case : Dict ):
'''simple docstring'''
snake_case_ , snake_case_ = find_best_thresh(snake_case , snake_case , snake_case , snake_case )
snake_case_ , snake_case_ = find_best_thresh(snake_case , snake_case , snake_case , snake_case )
snake_case_ = best_exact
snake_case_ = exact_thresh
snake_case_ = best_fa
snake_case_ = fa_thresh
def UpperCamelCase_( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case_ = json.load(snake_case )
snake_case_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
snake_case_ = json.load(snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case_ = json.load(snake_case )
else:
snake_case_ = {k: 0.0 for k in preds}
snake_case_ = make_qid_to_has_ans(snake_case ) # maps qid to True/False
snake_case_ = [k for k, v in qid_to_has_ans.items() if v]
snake_case_ = [k for k, v in qid_to_has_ans.items() if not v]
snake_case_ , snake_case_ = get_raw_scores(snake_case , snake_case )
snake_case_ = apply_no_ans_threshold(snake_case , snake_case , snake_case , OPTS.na_prob_thresh )
snake_case_ = apply_no_ans_threshold(snake_case , snake_case , snake_case , OPTS.na_prob_thresh )
snake_case_ = make_eval_dict(snake_case , snake_case )
if has_ans_qids:
snake_case_ = make_eval_dict(snake_case , snake_case , qid_list=snake_case )
merge_eval(snake_case , snake_case , "HasAns" )
if no_ans_qids:
snake_case_ = make_eval_dict(snake_case , snake_case , qid_list=snake_case )
merge_eval(snake_case , snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case , snake_case , snake_case , snake_case , snake_case , OPTS.out_image_dir )
histogram_na_prob(snake_case , snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(snake_case , snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(snake_case , snake_case )
else:
print(json.dumps(snake_case , indent=2 ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 85 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 85 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a: Tuple = logging.get_logger(__name__)
__a: List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a: Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a: List[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a: Dict = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a: List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
__a: Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
__a: Optional[int] = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
__a: str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a: Union[str, Any] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a: List[Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a: Any = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__a: str = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__a: Optional[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_lowerCAmelCase )
class UpperCAmelCase :
'''simple docstring'''
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
elif titles is None or texts is None:
lowercase__ : str = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles]
lowercase__ : List[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts]
lowercase__ : str = len(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts.""" )
lowercase__ : Dict = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )["""input_ids"""]
lowercase__ : str = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )["""input_ids"""]
lowercase__ : Optional[Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
}
if return_attention_mask is not False:
lowercase__ : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ : int = attention_mask
return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = 64 , __lowerCAmelCase = 4 , ) -> List[DPRSpanPrediction]:
lowercase__ : Tuple = reader_input["""input_ids"""]
lowercase__ : List[Any] = reader_output[:3]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ )
lowercase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ : Union[str, Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> List[DPRSpanPrediction]:
lowercase__ : Optional[Any] = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda __lowerCAmelCase : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
lowercase__ : int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCAmelCase )
class UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
| 366 | '''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a: List[str] = logging.get_logger(__name__)
__a: int = """▁"""
__a: Optional[int] = {"""vocab_file""": """prophetnet.tokenizer"""}
__a: Optional[int] = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__a: List[str] = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__a: Tuple = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Dict = collections.OrderedDict()
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as reader:
lowercase__ : List[Any] = reader.readlines()
for index, token in enumerate(UpperCAmelCase ):
lowercase__ : List[Any] = token.rstrip('''\n''' )
lowercase__ : Tuple = index
return vocab
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> None:
lowercase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
lowercase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
lowercase__ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase__ : str = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
lowercase__ : Tuple = F"""[unused{i}]"""
lowercase__ : List[Any] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase__ : Optional[Any] = 12
lowercase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__lowerCAmelCase )
def __getstate__( self ) -> Union[str, Any]:
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ) -> Dict:
lowercase__ : Any = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ : List[Any] = {}
lowercase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return ([0] * len(__lowerCAmelCase )) + [1]
return ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase( self ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(__lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[Any] = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowercase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase__ : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 214 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ) -> Tuple:
"""simple docstring"""
a_ : Optional[Any] = s.rsplit(__A , __A )
return new.join(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> List[str]:
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> int:
"""simple docstring"""
a_ : Tuple = {}
a_ : int = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
a_ : Optional[Any] = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
a_ : Union[str, Any] = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
a_ : Dict = rreplace(__A , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
a_ : Optional[Any] = rreplace(__A , '.b' , '.bias' , 1 )
a_ : Dict = value.float()
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Optional[int] , __A : Optional[int]=None , __A : List[Any]=True ) -> List[Any]:
"""simple docstring"""
from dall_e import Encoder
a_ : List[Any] = Encoder()
if os.path.exists(__A ):
a_ : Tuple = torch.load(__A )
else:
a_ : List[Any] = torch.hub.load_state_dict_from_url(__A )
if isinstance(__A , __A ):
a_ : Optional[int] = ckpt.state_dict()
encoder.load_state_dict(__A )
if config_path is not None:
a_ : Optional[int] = FlavaImageCodebookConfig.from_pretrained(__A )
else:
a_ : Any = FlavaImageCodebookConfig()
a_ : int = FlavaImageCodebook(__A ).eval()
a_ : List[str] = encoder.state_dict()
a_ : Any = upgrade_state_dict(__A )
hf_model.load_state_dict(__A )
a_ : int = hf_model.state_dict()
a_ : Optional[Any] = count_parameters(__A )
a_ : Tuple = count_parameters(__A )
assert torch.allclose(__A , __A , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(__A )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any:
a_ : Tuple = parent
a_ : int = batch_size
a_ : Tuple = seq_length
a_ : List[Any] = is_training
a_ : List[str] = use_token_type_ids
a_ : Dict = use_labels
a_ : Any = vocab_size
a_ : List[str] = hidden_size
a_ : Tuple = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : int = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : str = scope
a_ : Tuple = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_token_type_ids:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Any = self.num_labels
a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]:
a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : str = inputs_dict['labels']
a_ : Optional[int] = inputs_dict['labels']
a_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : str = OpenAIGPTModelTester(self )
a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
a_ : Tuple = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 32 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
stooge(_UpperCamelCase , 0 , len(_UpperCamelCase ) - 1 )
return arr
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__lowerCAmelCase : Optional[int] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__lowerCAmelCase : List[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(_UpperCamelCase , i + t , (_UpperCamelCase) )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted)) | 365 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) <= 1:
return lst
__lowerCAmelCase : str = 1
while i < len(_UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowerCAmelCase : int = 1
return lst
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted)) | 182 | 0 |
from manim import *
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = Rectangle(height=0.5 , width=0.5 )
a :Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a :List[Any] = [mem.copy() for i in range(6 )]
a :int = [mem.copy() for i in range(6 )]
a :Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Union[str, Any] = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Any = Text('''CPU''' , font_size=24 )
a :Dict = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
a :Optional[Any] = [mem.copy() for i in range(4 )]
a :Dict = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Union[str, Any] = Text('''GPU''' , font_size=24 )
a :List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
a :List[str] = [mem.copy() for i in range(6 )]
a :Union[str, Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Optional[int] = Text('''Model''' , font_size=24 )
a :int = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
a :Optional[Any] = []
for i, rect in enumerate(_lowerCamelCase ):
rect.set_stroke(_lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a :int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowerCamelCase , buff=0.0 )
self.add(_lowerCamelCase )
cpu_targs.append(_lowerCamelCase )
a :Optional[Any] = [mem.copy() for i in range(6 )]
a :Tuple = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Any = Text('''Loaded Checkpoint''' , font_size=24 )
a :Dict = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , aligned_edge=_lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
a :Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a :Optional[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
a :Dict = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.play(Write(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) )
a :Optional[Any] = []
a :List[Any] = []
for i, rect in enumerate(_lowerCamelCase ):
a :Tuple = fill.copy().set_fill(_lowerCamelCase , opacity=0.7 )
target.move_to(_lowerCamelCase )
first_animations.append(GrowFromCenter(_lowerCamelCase , run_time=1 ) )
a :Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 94 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ )
if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ):
lowerCAmelCase__ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ):
self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase__ : Union[str, Any] = [
getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
assert len(A_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ):
def alpha_bar(A_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = []
for i in range(A_ ):
lowerCAmelCase__ : str = i / num_diffusion_timesteps
lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) )
return jnp.array(A_ , dtype=A_ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : int = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase__ : str = 1.0 - betas
lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 )
return cls(
alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Any = state.alphas_cumprod
lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten()
lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape )
lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 106 | 0 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : bool = True , _lowerCamelCase : bool = False ):
"""simple docstring"""
A_ : Optional[int] = scheduler
A_ : List[Any] = optimizers if isinstance(UpperCamelCase_ , (list, tuple) ) else [optimizers]
A_ : Union[str, Any] = split_batches
A_ : List[Any] = step_with_optimizer
A_ : List[str] = GradientState()
def _a ( self : Union[str, Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
A_ : Any = AcceleratorState().num_processes
for _ in range(UpperCamelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
def _a ( self : Any ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def _a ( self : Tuple ):
"""simple docstring"""
return self.scheduler.state_dict()
def _a ( self : Union[str, Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
self.scheduler.load_state_dict(UpperCamelCase_ )
def _a ( self : Any ):
"""simple docstring"""
return self.scheduler.get_lr()
def _a ( self : Any , *_lowerCamelCase : List[str] , **_lowerCamelCase : Any ):
"""simple docstring"""
return self.scheduler.print_lr(*UpperCamelCase_ , **UpperCamelCase_ )
| 354 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __a):
__a : str = ["""image_processor""", """tokenizer"""]
__a : Any = """BlipImageProcessor"""
__a : Any = """AutoTokenizer"""
def __init__( self , _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = False
super().__init__(_A , _A )
_UpperCAmelCase : Optional[int] = self.image_processor
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(_A , return_tensors=_A )
if text is not None:
_UpperCAmelCase : str = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
else:
_UpperCAmelCase : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def __snake_case ( self , *_A , **_A ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def __snake_case ( self , *_A , **_A ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 246 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_UpperCAmelCase : List[str] = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : List[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_UpperCAmelCase : Dict = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
# load decoder from hub
_UpperCAmelCase : List[Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def __snake_case ( self , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Tuple:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Union[str, Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_A , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = feature_extractor(_A , return_tensors="""np""" )
_UpperCAmelCase : int = processor(_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=_A )
_UpperCAmelCase : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self , _A=(2, 10, 16) , _A=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : str = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_UpperCAmelCase : Optional[int] = processor.decode(_A )
_UpperCAmelCase : str = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase : int = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
_UpperCAmelCase : Dict = processor.batch_decode(_A , _A )
_UpperCAmelCase : Tuple = list(_A )
with get_context("""fork""" ).Pool() as p:
_UpperCAmelCase : Tuple = decoder.decode_beams_batch(_A , _A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_decoder()
_UpperCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : List[str] = 15
_UpperCAmelCase : Dict = -20.0
_UpperCAmelCase : List[str] = -4.0
_UpperCAmelCase : Any = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Any = decoded_processor_out.text
_UpperCAmelCase : Any = list(_A )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : str = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase : List[str] = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _A , atol=1e-3 ) )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : Any = 2.0
_UpperCAmelCase : Union[str, Any] = 5.0
_UpperCAmelCase : List[Any] = -20.0
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
_UpperCAmelCase : Tuple = decoded_processor_out.text
_UpperCAmelCase : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
_UpperCAmelCase : Optional[int] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _A )
_UpperCAmelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _A )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(_A )
_UpperCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Dict = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Union[str, Any] = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = processor_wavaveca(_A , return_tensors="""np""" )
_UpperCAmelCase : Any = processor_auto(_A , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
_UpperCAmelCase : Dict = processor_wavaveca.batch_decode(_A )
_UpperCAmelCase : Optional[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def __snake_case ( _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = self._get_dummy_logits()[0]
_UpperCAmelCase : Tuple = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_A , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self ) -> str:
'''simple docstring'''
import torch
_UpperCAmelCase : List[str] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_A )
_UpperCAmelCase : List[Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_UpperCAmelCase : List[Any] = iter(_A )
_UpperCAmelCase : Optional[Any] = next(_A )
_UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_UpperCAmelCase : Dict = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(_A ).logits.cpu().numpy()
_UpperCAmelCase : Union[str, Any] = processor.decode(logits[0] , output_word_offsets=_A )
_UpperCAmelCase : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase : Optional[int] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_UpperCAmelCase : List[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , _A )
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , output.text )
# output times
_UpperCAmelCase : List[str] = torch.tensor(self.get_from_offsets(_A , """start_time""" ) )
_UpperCAmelCase : Any = torch.tensor(self.get_from_offsets(_A , """end_time""" ) )
# fmt: off
_UpperCAmelCase : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
| 246 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__a )
self.set_fail_transitions()
def snake_case ( self , __a , __a ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def snake_case ( self , __a ):
__lowerCAmelCase = 0
for character in keyword:
__lowerCAmelCase = self.find_next_state(__a , __a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__lowerCAmelCase = len(self.adlist ) - 1
else:
__lowerCAmelCase = next_state
self.adlist[current_state]["output"].append(__a )
def snake_case ( self ):
__lowerCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__a )
__lowerCAmelCase = 0
while q:
__lowerCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__a )
__lowerCAmelCase = self.adlist[r]["fail_state"]
while (
self.find_next_state(__a , self.adlist[child]["value"] ) is None
and state != 0
):
__lowerCAmelCase = self.adlist[state]["fail_state"]
__lowerCAmelCase = self.find_next_state(
__a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
__lowerCAmelCase = 0
__lowerCAmelCase = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def snake_case ( self , __a ):
__lowerCAmelCase = {} # returns a dict with keywords and list of its occurrences
__lowerCAmelCase = 0
for i in range(len(__a ) ):
while (
self.find_next_state(__a , string[i] ) is None
and current_state != 0
):
__lowerCAmelCase = self.adlist[current_state]["fail_state"]
__lowerCAmelCase = self.find_next_state(__a , string[i] )
if next_state is None:
__lowerCAmelCase = 0
else:
__lowerCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__lowerCAmelCase = []
result[key].append(i - len(__a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : List[str] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""distilbert"""
__UpperCAmelCase : int ={
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , __a=3_05_22 , __a=5_12 , __a=False , __a=6 , __a=12 , __a=7_68 , __a=4 * 7_68 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.0_2 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 259 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def a ( self : int ) -> Union[str, Any]:
raise NotImplementedError()
| 229 | '''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case_ )
__lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCAmelCase = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str = parser.parse_args()
_A : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 229 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """align_text_model"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=0 , __a="absolute" , __a=True , **__a , ) -> Tuple:
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = pad_token_id
@classmethod
def UpperCamelCase__ (cls , __a , **__a ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """align_vision_model"""
def __init__(self , __a = 3 , __a = 600 , __a = 2.0 , __a = 3.1 , __a = 8 , __a = [3, 3, 5, 3, 5, 5, 3] , __a = [32, 16, 24, 40, 80, 112, 192] , __a = [16, 24, 40, 80, 112, 192, 320] , __a = [] , __a = [1, 2, 2, 2, 1, 2, 1] , __a = [1, 2, 2, 3, 3, 4, 1] , __a = [1, 6, 6, 6, 6, 6, 6] , __a = 0.25 , __a = "swish" , __a = 2560 , __a = "mean" , __a = 0.02 , __a = 0.0_01 , __a = 0.99 , __a = 0.2 , **__a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = width_coefficient
UpperCAmelCase__ = depth_coefficient
UpperCAmelCase__ = depth_divisor
UpperCAmelCase__ = kernel_sizes
UpperCAmelCase__ = in_channels
UpperCAmelCase__ = out_channels
UpperCAmelCase__ = depthwise_padding
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_block_repeats
UpperCAmelCase__ = expand_ratios
UpperCAmelCase__ = squeeze_expansion_ratio
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = pooling_type
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = batch_norm_eps
UpperCAmelCase__ = batch_norm_momentum
UpperCAmelCase__ = drop_connect_rate
UpperCAmelCase__ = sum(__a ) * 4
@classmethod
def UpperCamelCase__ (cls , __a , **__a ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """align"""
__SCREAMING_SNAKE_CASE = True
def __init__(self , __a=None , __a=None , __a=640 , __a=1.0 , __a=0.02 , **__a , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__a )
if text_config is None:
UpperCAmelCase__ = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
UpperCAmelCase__ = AlignTextConfig(**__a )
UpperCAmelCase__ = AlignVisionConfig(**__a )
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = temperature_init_value
UpperCAmelCase__ = initializer_range
@classmethod
def UpperCamelCase__ (cls , __a , __a , **__a ) -> int:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 335 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowercase_ : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowercase_ : Optional[int] = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase_ : Dict = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase_ : List[Any] = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowercase_ : Union[str, Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , snake_case_ )
return [m.group(0 ) for m in matches]
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_UpperCAmelCase = collections.defaultdict(snake_case_ )
_UpperCAmelCase = collections.defaultdict(snake_case_ )
_UpperCAmelCase = collections.defaultdict(snake_case_ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(snake_case_ ):
_UpperCAmelCase = None
if _re_tf_models.match(snake_case_ ) is not None:
_UpperCAmelCase = tf_models
_UpperCAmelCase = _re_tf_models.match(snake_case_ ).groups()[0]
elif _re_flax_models.match(snake_case_ ) is not None:
_UpperCAmelCase = flax_models
_UpperCAmelCase = _re_flax_models.match(snake_case_ ).groups()[0]
elif _re_pt_models.match(snake_case_ ) is not None:
_UpperCAmelCase = pt_models
_UpperCAmelCase = _re_pt_models.match(snake_case_ ).groups()[0]
if lookup_dict is not None:
while len(snake_case_ ) > 0:
if attr_name in model_prefix_to_model_type:
_UpperCAmelCase = True
break
# Try again after removing the last word in the name
_UpperCAmelCase = "".join(camel_case_split(snake_case_ )[:-1] )
_UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_UpperCAmelCase = list(snake_case_ )
all_models.sort()
_UpperCAmelCase = {"model_type": all_models}
_UpperCAmelCase = [pt_models[t] for t in all_models]
_UpperCAmelCase = [tf_models[t] for t in all_models]
_UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_UpperCAmelCase = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_UpperCAmelCase = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_UpperCAmelCase = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_UpperCAmelCase = "AutoTokenizer"
_UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
_UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(snake_case_ , snake_case_ , snake_case_ ):
# The type of pipeline may not exist in this framework
if not hasattr(snake_case_ , snake_case_ ):
continue
# First extract all model_names
_UpperCAmelCase = []
for name in getattr(snake_case_ , snake_case_ ).values():
if isinstance(snake_case_ , snake_case_ ):
model_names.append(snake_case_ )
else:
model_names.extend(list(snake_case_ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_frameworks_table()
_UpperCAmelCase = Dataset.from_pandas(snake_case_ )
_UpperCAmelCase = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=snake_case_ )
_UpperCAmelCase = Dataset.from_json(snake_case_ )
_UpperCAmelCase = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(snake_case_ ) )
}
_UpperCAmelCase = update_pipeline_and_auto_class_table(snake_case_ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_UpperCAmelCase = sorted(table.keys() )
_UpperCAmelCase = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
_UpperCAmelCase = Dataset.from_pandas(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(snake_case_ , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(snake_case_ , "pipeline_tags.json" ) )
if commit_sha is not None:
_UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_UpperCAmelCase = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=snake_case_ , repo_type="dataset" , token=snake_case_ , commit_message=snake_case_ , )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
_UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
_UpperCAmelCase = pipeline_tasks[key]["pt"]
if isinstance(snake_case_ , (list, tuple) ):
_UpperCAmelCase = model[0]
_UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(snake_case_ )
if len(snake_case_ ) > 0:
_UpperCAmelCase = ", ".join(snake_case_ )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowercase_ : List[str] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 133 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True , snake_case_="pt" ):
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(snake_case_ , snake_case_ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=snake_case_ , padding="max_length" if pad_to_max_length else None , truncation=snake_case_ , return_tensors=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]="train" , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]="" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : Optional[Any] ):
"""simple docstring"""
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case__ , snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(snake_case_ , os.path.join(snake_case_ , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=4 , **snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=snake_case_ , **snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=snake_case_ )
_UpperCAmelCase = {
"repo_id": str(snake_case_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return list(map(snake_case_ , snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "wb" ) as f:
return pickle.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def remove_articles(snake_case_ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case_ )
def white_space_fix(snake_case_ ):
return " ".join(text.split() )
def remove_punc(snake_case_ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = Counter(snake_case_ ) & Counter(snake_case_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase = 0
for hypo, pred in zip(snake_case_ , snake_case_ ):
em += exact_match_score(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
if not hasattr(snake_case_ , snake_case_ ) and not hasattr(snake_case_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case_ ) )
delattr(snake_case_ , snake_case_ )
continue
_UpperCAmelCase = p if hasattr(snake_case_ , snake_case_ ) else equivalent_param[p]
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
delattr(snake_case_ , snake_case_ )
return hparams, config
| 133 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''projector.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict['''model.post_net.linear.weight''']
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.post_net.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.linear.weight''']
SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = UniSpeechSatForXVector.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : str = downstream_dict['''connector.weight''']
SCREAMING_SNAKE_CASE : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Any = checkpoint['''Downstream''']
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(
_lowercase , return_attention_mask=_lowercase , do_normalize=_lowercase )
SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
SCREAMING_SNAKE_CASE : str = convert_classification(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForAudioFrameClassification''' ):
SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForXVector''' ):
SCREAMING_SNAKE_CASE : int = convert_xvector(_lowercase , _lowercase , _lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE : int = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 258 | from __future__ import annotations
from math import pi
def A ( _lowercase , _lowercase , _lowercase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
_lowerCamelCase : Optional[Any] = {
"""ctrl""": 256,
}
_lowerCamelCase : Tuple = {
"""Pregnancy""": 168629,
"""Christianity""": 7675,
"""Explain""": 106423,
"""Fitness""": 63440,
"""Saving""": 63163,
"""Ask""": 27171,
"""Ass""": 95985,
"""Joke""": 163509,
"""Questions""": 45622,
"""Thoughts""": 49605,
"""Retail""": 52342,
"""Feminism""": 164338,
"""Writing""": 11992,
"""Atheism""": 192263,
"""Netflix""": 48616,
"""Computing""": 39639,
"""Opinion""": 43213,
"""Alone""": 44967,
"""Funny""": 58917,
"""Gaming""": 40358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 77138,
"""Diet""": 36206,
"""Legal""": 11859,
"""Norman""": 4939,
"""Tip""": 72689,
"""Weight""": 52343,
"""Movies""": 46273,
"""Running""": 23425,
"""Science""": 2090,
"""Horror""": 37793,
"""Confession""": 60572,
"""Finance""": 12250,
"""Politics""": 16360,
"""Scary""": 191985,
"""Support""": 12654,
"""Technologies""": 32516,
"""Teenage""": 66160,
"""Event""": 32769,
"""Learned""": 67460,
"""Notion""": 182770,
"""Wikipedia""": 37583,
"""Books""": 6665,
"""Extract""": 76050,
"""Confessions""": 102701,
"""Conspiracy""": 75932,
"""Links""": 63674,
"""Narcissus""": 150425,
"""Relationship""": 54766,
"""Relationships""": 134796,
"""Reviews""": 41671,
"""News""": 4256,
"""Translation""": 26820,
"""multilingual""": 128406,
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(lowercase_ )
return pairs
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTROL_CODES
def __init__( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]="<unk>" , **UpperCAmelCase__ : List[Any]) ->List[str]:
'''simple docstring'''
super().__init__(unk_token=UpperCAmelCase__ , **UpperCAmelCase__)
with open(UpperCAmelCase__ , encoding='''utf-8''') as vocab_handle:
A__ = json.load(UpperCAmelCase__)
A__ = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase__ , encoding='''utf-8''') as merges_handle:
A__ = merges_handle.read().split('''\n''')[1:-1]
A__ = [tuple(merge.split()) for merge in merges]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = {}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[Any]) ->Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ = tuple(UpperCAmelCase__)
A__ = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
A__ = get_pairs(UpperCAmelCase__)
if not pairs:
return token
while True:
A__ = min(UpperCAmelCase__ , key=lambda UpperCAmelCase__: self.bpe_ranks.get(UpperCAmelCase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(UpperCAmelCase__):
try:
A__ = word.index(UpperCAmelCase__ , UpperCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
A__ = j
if word[i] == first and i < len(UpperCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
A__ = tuple(UpperCAmelCase__)
A__ = new_word
if len(UpperCAmelCase__) == 1:
break
else:
A__ = get_pairs(UpperCAmelCase__)
A__ = '''@@ '''.join(UpperCAmelCase__)
A__ = word[:-4]
A__ = word
return word
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Any) ->Optional[int]:
'''simple docstring'''
A__ = []
A__ = re.findall(R'''\S+\n?''' , UpperCAmelCase__)
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase__).split(''' ''')))
return split_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str]) ->Any:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str) ->int:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase__ , self.unk_token)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
A__ = ''' '''.join(UpperCAmelCase__).replace('''@@ ''' , '''''').strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__) + '''\n''')
A__ = 0
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
A__ = token_index
writer.write(''' '''.join(UpperCAmelCase__) + '''\n''')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __snake_case , __snake_case=13 , __snake_case=3 , __snake_case=224 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
__a =size if size is not None else {'''height''': 18, '''width''': 18}
__a =parent
__a =batch_size
__a =num_channels
__a =image_size
__a =min_resolution
__a =max_resolution
__a =do_resize
__a =size
__a =do_normalize
__a =image_mean
__a =image_std
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __magic_name__ ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = ViTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =EfficientFormerImageProcessorTester(self )
@property
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'image_mean' ) )
self.assertTrue(hasattr(a_ , 'image_std' ) )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_resize' ) )
self.assertTrue(hasattr(a_ , 'size' ) )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a =prepare_image_inputs(self.image_proc_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__a =image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__a =image_processor(a_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a =prepare_image_inputs(self.image_proc_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__a =image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__a =image_processor(a_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a =prepare_image_inputs(self.image_proc_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__a =image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__a =image_processor(a_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 352 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_4 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = use_mc_token_ids
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = self.vocab_size - 1
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
if self.use_mc_token_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
lowerCamelCase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCamelCase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
lowerCamelCase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase_ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = CTRLModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(UpperCamelCase_ )
lowerCamelCase__ = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
lowerCamelCase__ = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCamelCase__ = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 209 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowercase : bool = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowercase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowercase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
def _dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _a ( ):
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
lowercase__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
lowercase__ = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
lowercase__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , evaluate=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowercase__ = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , prediction_loss_only=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = math.exp(eval_output['''eval_loss'''] )
lowercase__ = {'''perplexity''': perplexity}
lowercase__ = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE )
return results
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 110 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase_ ( lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase_ ( lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : str = create_tensor(lowerCAmelCase__ )
__UpperCAmelCase : Union[str, Any] = gather(lowerCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Dict = [state.process_index]
__UpperCAmelCase : int = gather_object(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == state.num_processes, f'{gathered_obj}, {len(lowerCAmelCase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}'
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Any = create_tensor(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = broadcast(lowerCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase_ ( lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if state.is_main_process:
__UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__UpperCAmelCase : List[str] = torch.arange(state.num_processes ).to(state.device )
__UpperCAmelCase : int = pad_across_processes(lowerCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
if state.num_processes != 2:
return
__UpperCAmelCase : Any = create_tensor(lowerCAmelCase__ )
__UpperCAmelCase : str = reduce(lowerCAmelCase__ , """sum""" )
__UpperCAmelCase : Tuple = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), f'{reduced_tensor} != {truth_tensor}'
def lowercase_ ( lowerCAmelCase__ : Tuple ):
"""simple docstring"""
if state.num_processes != 2:
return
__UpperCAmelCase : int = create_tensor(lowerCAmelCase__ )
__UpperCAmelCase : Union[str, Any] = reduce(lowerCAmelCase__ , """mean""" )
__UpperCAmelCase : int = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), f'{reduced_tensor} != {truth_tensor}'
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
main()
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = PartialState()
state.print(f'State: {state}' )
state.print("""testing gather""" )
test_gather(lowerCAmelCase__ )
state.print("""testing gather_object""" )
test_gather_object(lowerCAmelCase__ )
state.print("""testing broadcast""" )
test_broadcast(lowerCAmelCase__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(lowerCAmelCase__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(lowerCAmelCase__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 16 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a__ :
def __init__( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[Any]=13, lowerCAmelCase : str=7, lowerCAmelCase : Union[str, Any]=True, lowerCAmelCase : Optional[int]=True, lowerCAmelCase : Dict=True, lowerCAmelCase : List[str]=True, lowerCAmelCase : List[Any]=99, lowerCAmelCase : Tuple=32, lowerCAmelCase : int=2, lowerCAmelCase : Dict=4, lowerCAmelCase : List[str]=37, lowerCAmelCase : Any="gelu", lowerCAmelCase : Optional[int]=0.1, lowerCAmelCase : Tuple=0.1, lowerCAmelCase : Optional[int]=512, lowerCAmelCase : Dict=16, lowerCAmelCase : Tuple=2, lowerCAmelCase : Union[str, Any]=0.02, lowerCAmelCase : str=3, lowerCAmelCase : Any=4, lowerCAmelCase : List[str]=None, lowerCAmelCase : Union[str, Any]=1000, ) -> Dict:
lowercase : Optional[Any] = parent
lowercase : Tuple = batch_size
lowercase : List[Any] = seq_length
lowercase : List[str] = is_training
lowercase : Optional[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : List[Any] = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : int = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : str = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Optional[int] = type_sequence_label_size
lowercase : str = initializer_range
lowercase : Any = num_labels
lowercase : List[Any] = num_choices
lowercase : Optional[int] = scope
lowercase : str = range_bbox
def lowercase ( self : str ) -> Optional[int]:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase : str = bbox[i, j, 3]
lowercase : Tuple = bbox[i, j, 1]
lowercase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase : Optional[int] = bbox[i, j, 2]
lowercase : List[str] = bbox[i, j, 0]
lowercase : Union[str, Any] = t
lowercase : Any = tf.convert_to_tensor(lowerCAmelCase )
lowercase : Optional[Any] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase : Dict = None
lowercase : List[str] = None
lowercase : List[Any] = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
lowercase : Tuple = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : int, lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[Any] ) -> Dict:
lowercase : Dict = TFLayoutLMModel(config=lowerCAmelCase )
lowercase : str = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Union[str, Any] = model(lowerCAmelCase, lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Any = model(lowerCAmelCase, lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase ( self : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Tuple, lowerCAmelCase : List[str] ) -> Any:
lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=lowerCAmelCase )
lowercase : List[Any] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Any, lowerCAmelCase : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : int ) -> List[str]:
lowercase : Optional[Any] = self.num_labels
lowercase : Optional[int] = TFLayoutLMForSequenceClassification(config=lowerCAmelCase )
lowercase : Tuple = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple, lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : List[str], lowerCAmelCase : int, lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict ) -> Dict:
lowercase : Optional[int] = self.num_labels
lowercase : int = TFLayoutLMForTokenClassification(config=lowerCAmelCase )
lowercase : List[str] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str], lowerCAmelCase : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple ) -> Optional[Any]:
lowercase : List[str] = TFLayoutLMForQuestionAnswering(config=lowerCAmelCase )
lowercase : Optional[int] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = 10
def lowercase ( self : Tuple ) -> int:
lowercase : int = TFLayoutLMModelTester(self )
lowercase : int = ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase ( self : str ) -> List[Any]:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase ( self : List[Any] ) -> Tuple:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowercase ( self : int ) -> List[str]:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowercase ( self : Dict ) -> int:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
def lowercase ( self : List[str] ) -> Any:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
@slow
def lowercase ( self : Dict ) -> List[Any]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFLayoutLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowercase ( self : List[Any] ) -> List[Any]:
pass
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : Any = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
lowercase : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase : List[str] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
lowercase : Dict = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase : List[Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[int] ) -> str:
lowercase : Any = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[str] = model(input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
# test the sequence output on [0, :3, :3]
lowercase : Dict = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], lowerCAmelCase, atol=1e-3 ) )
# test the pooled output on [1, :3]
lowercase : Any = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], lowerCAmelCase, atol=1e-3 ) )
@slow
def lowercase ( self : List[Any] ) -> Any:
# initialize model with randomly initialized sequence classification head
lowercase : List[str] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=2 )
lowercase , lowercase , lowercase , lowercase , lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : Optional[int] = model(
input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=tf.convert_to_tensor([1, 1] ), )
# test whether we get a loss as a scalar
lowercase : List[str] = outputs.loss
lowercase : List[Any] = (2,)
self.assertEqual(loss.shape, lowerCAmelCase )
# test the shape of the logits
lowercase : str = outputs.logits
lowercase : List[str] = (2, 2)
self.assertEqual(logits.shape, lowerCAmelCase )
@slow
def lowercase ( self : List[Any] ) -> str:
# initialize model with randomly initialized token classification head
lowercase : Tuple = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=13 )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[str] = model(
input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
# test the shape of the logits
lowercase : Union[str, Any] = outputs.logits
lowercase : Union[str, Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape, lowerCAmelCase )
@slow
def lowercase ( self : Union[str, Any] ) -> int:
# initialize model with randomly initialized token classification head
lowercase : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : int = model(input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
# test the shape of the logits
lowercase : str = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape, lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape, lowerCAmelCase )
| 255 |
"""simple docstring"""
import math
def lowercase__ ( _UpperCAmelCase = 1_00 ) -> int:
'''simple docstring'''
lowercase : List[str] = sum(i * i for i in range(1 , n + 1 ) )
lowercase : Dict = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase: Any = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase : Union[str, Any] = k.replace(_UpperCAmelCase , _UpperCAmelCase )
return k
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase : Any = DEFAULTS.copy()
cfg_kwargs.update(_UpperCAmelCase )
lowercase : Tuple = PegasusConfig(**_UpperCAmelCase )
lowercase : str = PegasusForConditionalGeneration(_UpperCAmelCase )
lowercase : Union[str, Any] = torch_model.model.state_dict()
lowercase : Any = {}
for k, v in tf_weights.items():
lowercase : List[str] = rename_state_dict_key(_UpperCAmelCase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase : List[str] = v.T
lowercase : Any = torch.tensor(_UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase : Union[str, Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
lowercase : List[Any] = mapping['shared.weight']
lowercase : Optional[int] = mapping['shared.weight']
lowercase : Optional[int] = {k: torch.zeros_like(_UpperCAmelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_UpperCAmelCase )
lowercase : Optional[Any] = torch_model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowercase : Optional[int] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowercase__ ( _UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] = tf.train.list_variables(_UpperCAmelCase )
lowercase : Dict = {}
lowercase : str = ['Adafactor', 'global_step']
for name, shape in tqdm(_UpperCAmelCase , desc='converting tf checkpoint to dict' ):
lowercase : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase : Optional[Any] = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
lowercase : str = array
return tf_weights
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any = Path(_UpperCAmelCase ).parent.name
lowercase : int = task_specific_params[f'''summarization_{dataset}''']['max_position_embeddings']
lowercase : List[Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCAmelCase )
# convert model
lowercase : List[Any] = get_tf_weights_as_numpy(_UpperCAmelCase )
lowercase : Optional[int] = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
lowercase : int = task_specific_params
lowercase : Optional[Any] = convert_pegasus(_UpperCAmelCase , _UpperCAmelCase )
torch_model.save_pretrained(_UpperCAmelCase )
lowercase : Optional[Any] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_UpperCAmelCase , Path(_UpperCAmelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_UpperCamelCase: int = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase: int = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase: str = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase: Dict = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[int] = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_UpperCamelCase: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def _A ( *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
pass
@is_pipeline_test
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _A ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCamelCase :Tuple = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _A ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
UpperCamelCase :Tuple = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase :Optional[Any] = len(__lowerCamelCase )
self.assertGreater(__lowerCamelCase , 0 )
self.assertEqual(
__lowerCamelCase , [
{
"""score""": ANY(__lowerCamelCase ),
"""label""": ANY(__lowerCamelCase ),
"""box""": {"""xmin""": ANY(__lowerCamelCase ), """ymin""": ANY(__lowerCamelCase ), """xmax""": ANY(__lowerCamelCase ), """ymax""": ANY(__lowerCamelCase )},
}
for i in range(__lowerCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _A ( self : Dict ):
pass
@require_torch
def _A ( self : List[str] ):
UpperCamelCase :Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCamelCase :Tuple = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
UpperCamelCase :Any = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[int] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
UpperCamelCase :List[str] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _A ( self : List[Any] ):
pass
@require_torch
@slow
def _A ( self : str ):
UpperCamelCase :Dict = 0.2
UpperCamelCase :Optional[Any] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Optional[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _A ( self : Optional[int] ):
UpperCamelCase :int = 2
UpperCamelCase :List[str] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 38 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__snake_case = logging.get_logger(__name__)
__snake_case = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
else:
return _interleave_iterable_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
else:
return _concatenate_iterable_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
| 176 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
_lowerCAmelCase =[
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =emb.weight.shape
_lowerCAmelCase =nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
_lowerCAmelCase =emb.weight.data
return lin_layer
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase="facebook/mbart-large-en-ro" , __UpperCamelCase=False , __UpperCamelCase=False ) -> str:
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__UpperCamelCase )
_lowerCAmelCase =state_dict["""encoder.embed_tokens.weight"""].shape[0]
_lowerCAmelCase =MBartConfig.from_pretrained(__UpperCamelCase , vocab_size=__UpperCamelCase )
if mbart_aa and finetuned:
_lowerCAmelCase ="""relu"""
_lowerCAmelCase =state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase =MBartForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase )
if finetuned:
_lowerCAmelCase =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
__A = parser.parse_args()
__A = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 352 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 0 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a :
@property
def UpperCamelCase ( self : Tuple ) -> Dict:
return self.get_dummy_input()
@property
def UpperCamelCase ( self : int ) -> str:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> Dict:
lowerCamelCase_ = 4
lowerCamelCase_ = 32
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = torch.device(_lowerCamelCase )
lowerCamelCase_ = (batch_size, num_channels) + sizes
lowerCamelCase_ = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase )
lowerCamelCase_ = {'hidden_states': hidden_states}
if include_temb:
lowerCamelCase_ = 128
lowerCamelCase_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCamelCase , device=_lowerCamelCase )
if include_res_hidden_states_tuple:
lowerCamelCase_ = torch.manual_seed(1 )
lowerCamelCase_ = (randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase ),)
if include_encoder_hidden_states:
lowerCamelCase_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCamelCase )
if include_skip_sample:
lowerCamelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCamelCase , device=_lowerCamelCase )
return dummy_input
def UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowerCamelCase_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowerCamelCase_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowerCamelCase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ = self.block_class(**_lowerCamelCase )
unet_block.to(_lowerCamelCase )
unet_block.eval()
with torch.no_grad():
lowerCamelCase_ = unet_block(**_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCamelCase_ = output[0, -1, -3:, -3:]
lowerCamelCase_ = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
assert torch_all_close(output_slice.flatten() , _lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def UpperCamelCase ( self : int ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ = self.block_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
lowerCamelCase_ = model(**_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = output[0]
lowerCamelCase_ = torch.device(_lowerCamelCase )
lowerCamelCase_ = randn_tensor(output.shape , device=_lowerCamelCase )
lowerCamelCase_ = torch.nn.functional.mse_loss(_lowerCamelCase , _lowerCamelCase )
loss.backward()
| 183 |
"""simple docstring"""
_UpperCamelCase : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_UpperCamelCase : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def _SCREAMING_SNAKE_CASE ( __snake_case : float , __snake_case : str , __snake_case : str ):
'''simple docstring'''
lowercase = from_type.lower().strip('s' )
lowercase = to_type.lower().strip('s' )
lowercase = UNIT_SYMBOL.get(__snake_case , __snake_case )
lowercase = UNIT_SYMBOL.get(__snake_case , __snake_case )
if from_sanitized not in METRIC_CONVERSION:
lowercase = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(__snake_case )}'
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
lowercase = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(__snake_case )}'
)
raise ValueError(__snake_case )
lowercase = METRIC_CONVERSION[from_sanitized]
lowercase = METRIC_CONVERSION[to_sanitized]
lowercase = 1
if from_exponent > to_exponent:
lowercase = from_exponent - to_exponent
else:
lowercase = -(to_exponent - from_exponent)
return value * pow(10 , __snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCAmelCase_ = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
UpperCAmelCase_ = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase__ : str = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , bootstrap_aggregation=__UpperCAmelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , bootstrap_aggregation=__UpperCAmelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase__ : Union[str, Any] = '''rougeLsum'''
UpperCamelCase__ : List[str] = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , newline_sep=__UpperCAmelCase , rouge_keys=[k] )[k]
UpperCamelCase__ : Optional[Any] = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , newline_sep=__UpperCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( ) -> str:
UpperCamelCase__ : int = ['''rouge1''', '''rouge2''', '''rougeL''']
UpperCamelCase__ : Union[str, Any] = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , newline_sep=__UpperCAmelCase , rouge_keys=__UpperCAmelCase )
UpperCamelCase__ : int = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , newline_sep=__UpperCAmelCase , rouge_keys=__UpperCAmelCase )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
UpperCamelCase__ : Dict = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , newline_sep=__UpperCAmelCase ) == calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , newline_sep=__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : List[str] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
UpperCamelCase__ : Any = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
UpperCamelCase__ : List[Any] = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , rouge_keys=['''rougeLsum'''] , newline_sep=__UpperCAmelCase )['''rougeLsum''']
UpperCamelCase__ : List[str] = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase__ : Optional[int] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
UpperCamelCase__ : List[str] = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[Any] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
| 247 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__ = 16, __magic_name__ = 88, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = 32, __magic_name__ = None, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "geglu", __magic_name__ = None, ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__, attention_head_dim=__magic_name__, in_channels=__magic_name__, num_layers=__magic_name__, dropout=__magic_name__, norm_num_groups=__magic_name__, cross_attention_dim=__magic_name__, attention_bias=__magic_name__, sample_size=__magic_name__, num_vector_embeds=__magic_name__, activation_fn=__magic_name__, num_embeds_ada_norm=__magic_name__, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase__ : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase__ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase__ : int = [1, 0]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__ = True, ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = hidden_states
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase__ : List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase__ : List[str] = self.transformer_index_for_condition[i]
UpperCamelCase__ : List[str] = self.transformers[transformer_index](
__magic_name__, encoder_hidden_states=__magic_name__, timestep=__magic_name__, cross_attention_kwargs=__magic_name__, return_dict=__magic_name__, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase__ : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase__ : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 247 | 1 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
A_ = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
A_ , A_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
A_ = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
A_ = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
A_ = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 139 |
'''simple docstring'''
import math
def A_ ( snake_case , snake_case ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 139 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a_ : Tuple = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _snake_case :
_lowercase : str
_lowercase : Optional[str] = None
_lowercase : Optional[Union[str, int]] = None
_lowercase : Optional[Union[str, int]] = None
_lowercase : Optional[Union[str, int]] = None
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _str_to_version_tuple(self.version_str)
def __repr__( self) -> List[str]:
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
if isinstance(a , a):
return Version(a)
elif isinstance(a , a):
return other
raise TypeError(f'''{other} (type {type(a)}) cannot be compared to version.''')
def __eq__( self , a) -> str:
try:
SCREAMING_SNAKE_CASE = self._validate_operand(a)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = self._validate_operand(a)
return self.tuple < other.tuple
def __hash__( self) -> Dict:
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a) -> int:
SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def SCREAMING_SNAKE_CASE__ ( self) -> str:
return self.version_str
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = _VERSION_REG.match(_UpperCAmelCase)
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''')
return tuple(int(_UpperCAmelCase) for v in [res.group('major'), res.group('minor'), res.group('patch')])
def lowerCamelCase__ (_UpperCAmelCase):
return ".".join(str(_UpperCAmelCase) for v in version_tuple)
| 363 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
_lowercase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=a , tokenizer=a)
return generator, ["Something to write", "Something else"]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Any:
SCREAMING_SNAKE_CASE = generator('Something there')
self.assertEqual(a , [{'generated_text': ANY(a)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
SCREAMING_SNAKE_CASE = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
SCREAMING_SNAKE_CASE = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
with self.assertRaises(a):
generator(4)
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = generator(
'Something there' , num_return_sequences=a , num_beams=a , )
SCREAMING_SNAKE_CASE = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(a , a)
SCREAMING_SNAKE_CASE = generator('This is a test' , do_sample=a , num_return_sequences=2 , return_tensors=a)
self.assertEqual(
a , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = generator(
['This is a test', 'This is a second test'] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
| 327 | 0 |
__lowerCamelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__lowerCamelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__lowerCamelCase : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 52 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 | 1 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
A_ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
A_ : Any = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
A_ : Dict = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = CamembertTokenizer
UpperCAmelCase = CamembertTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def _snake_case ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : str = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = """<pad>"""
_UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) ,a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) ,a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>NOTUSED' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(a_ ) ,1_004 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size ,1_005 )
def _snake_case ( self ) -> int:
_UpperCAmelCase : Union[str, Any] = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCAmelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
_UpperCAmelCase : List[Any] = tokenizer.encode(a_ )
_UpperCAmelCase : str = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ ,a_ )
_UpperCAmelCase : Optional[int] = tokenizer.encode(a_ ,add_special_tokens=a_ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.encode(a_ ,add_special_tokens=a_ )
self.assertListEqual(a_ ,a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(a_ )
_UpperCAmelCase : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_rust_tokenizer()
_UpperCAmelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
_UpperCAmelCase : Optional[int] = tokenizer.tokenize(a_ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ ,a_ )
_UpperCAmelCase : Any = tokenizer.encode(a_ ,add_special_tokens=a_ )
_UpperCAmelCase : int = rust_tokenizer.encode(a_ ,add_special_tokens=a_ )
self.assertListEqual(a_ ,a_ )
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Tuple = tokenizer.encode(a_ )
_UpperCAmelCase : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ ,a_ )
@slow
def _snake_case ( self ) -> Any:
# fmt: off
_UpperCAmelCase : List[str] = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCAmelCase : List[str] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ ,model_name='camembert-base' ,revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' ,sequences=a_ ,)
| 366 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case_ = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case_ = self.block_out_channels[i]
snake_case_ = self.block_out_channels[i + 1]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = blocks
snake_case_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
for block in self.blocks:
snake_case_ = block(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
snake_case_ = self.conv_out(_lowerCAmelCase )
return embedding
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , a , a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = (320, 640, 1280, 1280)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1280
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = "rgb"
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ , snake_case_ = jax.random.split(_lowerCAmelCase )
snake_case_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def lowerCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
snake_case_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case_ = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = []
snake_case_ = block_out_channels[0]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
for _ in range(self.layers_per_block ):
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
if not is_final_block:
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
snake_case_ = down_blocks
snake_case_ = controlnet_down_blocks
# mid
snake_case_ = block_out_channels[-1]
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
snake_case_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case_ = jnp.flip(_lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(_lowerCAmelCase , 0 )
snake_case_ = self.time_proj(_lowerCAmelCase )
snake_case_ = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.controlnet_cond_embedding(_lowerCAmelCase )
sample += controlnet_cond
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
snake_case_ = ()
for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase , self.controlnet_down_blocks ):
snake_case_ = controlnet_block(_lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case_ = controlnet_down_block_res_samples
snake_case_ = self.controlnet_mid_block(_lowerCAmelCase )
# 6. scaling
snake_case_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_lowerCAmelCase , mid_block_res_sample=_lowerCAmelCase )
| 159 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 159 | 1 |
def A ( _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> str:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
_UpperCAmelCase = input_str.split('_' )
_UpperCAmelCase = 0 if use_pascal else 1
_UpperCAmelCase = words[start_index:]
_UpperCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_UpperCAmelCase = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__A : Any = TypeVar('''T''')
class __A ( Generic[T] ):
def __init__( self : Dict , UpperCAmelCase_ : list[T] , UpperCAmelCase_ : Callable[[T, T], T] ):
lowerCAmelCase : Any | T = None
lowerCAmelCase : int = len(UpperCAmelCase_ )
lowerCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
lowerCAmelCase : List[Any] = fnc
self.build()
def lowercase__ ( self : str ):
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : T ):
p += self.N
lowerCAmelCase : int = v
while p > 1:
lowerCAmelCase : List[Any] = p // 2
lowerCAmelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): # noqa: E741
lowerCAmelCase , lowerCAmelCase : str = l + self.N, r + self.N
lowerCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase : Any = self.st[l] if res is None else self.fn(UpperCAmelCase_ , self.st[l] )
if r % 2 == 0:
lowerCAmelCase : Optional[int] = self.st[r] if res is None else self.fn(UpperCAmelCase_ , self.st[r] )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__A : str = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__A : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__A : Optional[int] = SegmentTree(test_array, min)
__A : Optional[int] = SegmentTree(test_array, max)
__A : Dict = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
for i in range(len(_UpperCAmelCase ) ):
for j in range(_UpperCAmelCase, len(_UpperCAmelCase ) ):
lowerCAmelCase : str = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : Dict = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : str = reduce(lambda _UpperCAmelCase, _UpperCAmelCase : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert max_range == max_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert sum_range == sum_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
__A : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 138 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowercase : List[Any] = 0
__lowercase : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowercase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self , __a , __a , __a , __a , __a , __a , ):
'''simple docstring'''
__a : Union[str, Any] = pos_x
__a : int = pos_y
__a : Any = (pos_y, pos_x)
__a : Tuple = goal_x
__a : Optional[int] = goal_y
__a : Dict = g_cost
__a : str = parent
__a : Dict = self.calculate_heuristic()
__a : Optional[int] = self.g_cost + self.h_cost
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.pos_x - self.goal_x
__a : int = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase_ ) + abs(UpperCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , __a ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __UpperCamelCase :
def __init__( self , __a , __a ):
'''simple docstring'''
__a : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_ )
__a : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCAmelCase_ )
__a : str = [self.start]
__a : list[Node] = []
__a : List[str] = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a : List[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase_ )
self.closed_nodes.append(UpperCAmelCase_ )
__a : List[Any] = self.get_successors(UpperCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase_ )
else:
# retrieve the best current path
__a : List[str] = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase_ )
else:
self.open_nodes.append(UpperCAmelCase_ )
return [self.start.pos]
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = []
for action in delta:
__a : Any = parent.pos_x + action[1]
__a : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ) )
return successors
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = node
__a : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a : int = current_node.parent
path.reverse()
return path
class __UpperCamelCase :
def __init__( self , __a , __a ):
'''simple docstring'''
__a : Dict = AStar(UpperCAmelCase_ , UpperCAmelCase_ )
__a : str = AStar(UpperCAmelCase_ , UpperCAmelCase_ )
__a : int = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__a : List[str] = self.fwd_astar.open_nodes.pop(0 )
__a : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase_ , UpperCAmelCase_ )
self.fwd_astar.closed_nodes.append(UpperCAmelCase_ )
self.bwd_astar.closed_nodes.append(UpperCAmelCase_ )
__a : Any = current_bwd_node
__a : Tuple = current_fwd_node
__a : List[str] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase_ )
else:
# retrieve the best current path
__a : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase_ )
else:
astar.open_nodes.append(UpperCAmelCase_ )
return [self.fwd_astar.start.pos]
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.fwd_astar.retrace_path(UpperCAmelCase_ )
__a : Tuple = self.bwd_astar.retrace_path(UpperCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
__a : Optional[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowercase : Tuple = (0, 0)
__lowercase : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : List[Any] = AStar(init, goal)
__lowercase : int = a_star.search()
__lowercase : List[Any] = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__lowercase : List[str] = time.time()
__lowercase : Tuple = BidirectionalAStar(init, goal)
__lowercase : List[str] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 365 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCamelCase_ = BlipaProcessor(lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase_ = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCamelCase_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(lowercase , return_tensors="np" )
lowerCamelCase_ = processor(images=lowercase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = processor(text=lowercase )
lowerCamelCase_ = tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(lowercase )
lowerCamelCase_ = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 19 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Any = ["""input_features""", """attention_mask"""]
def __init__( self : List[str] , _UpperCamelCase : List[Any]=80 , _UpperCamelCase : Optional[Any]=16_000 , _UpperCamelCase : int=0.0 , _UpperCamelCase : str=10 , _UpperCamelCase : List[str]=25 , _UpperCamelCase : Any="hamming_window" , _UpperCamelCase : Any=32_768.0 , _UpperCamelCase : str=0.9_7 , _UpperCamelCase : Optional[Any]=1.0 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]=False , **_UpperCamelCase : Dict , ) -> Any:
'''simple docstring'''
super().__init__(feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = win_length
SCREAMING_SNAKE_CASE = frame_signal_scale
SCREAMING_SNAKE_CASE = preemphasis_coeff
SCREAMING_SNAKE_CASE = mel_floor
SCREAMING_SNAKE_CASE = normalize_means
SCREAMING_SNAKE_CASE = normalize_vars
SCREAMING_SNAKE_CASE = win_function
SCREAMING_SNAKE_CASE = return_attention_mask
SCREAMING_SNAKE_CASE = win_length * sampling_rate // 1_000
SCREAMING_SNAKE_CASE = hop_length * sampling_rate // 1_000
SCREAMING_SNAKE_CASE = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE = (self.n_fft // 2) + 1
def __snake_case( self : List[Any] , _UpperCamelCase : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE = spectrogram(
one_waveform * self.frame_signal_scale , window=_UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=_UpperCamelCase , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def __snake_case( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
if self.normalize_means:
SCREAMING_SNAKE_CASE = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE = np.subtract(_UpperCamelCase , _UpperCamelCase )
if self.normalize_vars:
SCREAMING_SNAKE_CASE = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE = np.divide(_UpperCamelCase , _UpperCamelCase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE = x.astype(np.floataa )
return x
def __snake_case( self : int , _UpperCamelCase : List[np.ndarray] , _UpperCamelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_UpperCamelCase , _UpperCamelCase , self.padding_value ) for x, n in zip(_UpperCamelCase , _UpperCamelCase )]
def __call__( self : List[str] , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE = [self._extract_mfsc_features(_UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE = BatchFeature({"input_features": features} )
SCREAMING_SNAKE_CASE = self.pad(
_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE = padded_inputs.get("input_features" )
if isinstance(input_features[0] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE = padded_inputs.get("attention_mask" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE = (
np.array(_UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(_UpperCamelCase , max_length=_UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE = self.normalize(
padded_inputs["input_features"] , attention_mask=_UpperCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs
| 367 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Union[str, Any] , **_UpperCamelCase : str ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : List[Any] , **_UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase )
def __snake_case( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=_UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=_UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def __snake_case( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 206 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowercase__ :
def __init__( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = {}
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str=1 ):
'''simple docstring'''
if self.graph.get(__A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCamelCase : List[str] = [[w, v]]
if not self.graph.get(__A ):
_UpperCamelCase : Optional[int] = []
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return list(self.graph )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ):
'''simple docstring'''
if self.graph.get(__A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__A )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : str=-2 ,lowerCamelCase__ : Dict=-1 ):
'''simple docstring'''
if s == d:
return []
_UpperCamelCase : Dict = []
_UpperCamelCase : Any = []
if s == -2:
_UpperCamelCase : Optional[Any] = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_UpperCamelCase : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCamelCase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__A ) != 0:
_UpperCamelCase : List[Any] = stack[len(__A ) - 1]
else:
_UpperCamelCase : Tuple = ss
# check if se have reached the starting point
if len(__A ) == 0:
return visited
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any]=-1 ):
'''simple docstring'''
if c == -1:
_UpperCamelCase : List[Any] = floor(random() * 10000 ) + 10
for i in range(__A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCamelCase : int = floor(random() * c ) + 1
if n != i:
self.add_pair(__A ,__A ,1 )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Any=-2 ):
'''simple docstring'''
_UpperCamelCase : List[Any] = deque()
_UpperCamelCase : Dict = []
if s == -2:
_UpperCamelCase : int = list(self.graph )[0]
d.append(__A )
visited.append(__A )
while d:
_UpperCamelCase : List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Union[str, Any]=-2 ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[Any] = []
if s == -2:
_UpperCamelCase : List[Any] = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCamelCase : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCamelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__A ) != 0:
_UpperCamelCase : Optional[int] = stack[len(__A ) - 1]
else:
_UpperCamelCase : List[str] = ss
# check if se have reached the starting point
if len(__A ) == 0:
return sorted_nodes
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : List[str] = []
_UpperCamelCase : Optional[int] = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_UpperCamelCase : Any = -2
_UpperCamelCase : List[str] = []
_UpperCamelCase : List[str] = s
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCamelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCamelCase : Optional[Any] = len(__A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCamelCase : int = True
if len(__A ) != 0:
_UpperCamelCase : Tuple = stack[len(__A ) - 1]
else:
_UpperCamelCase : int = False
indirect_parents.append(__A )
_UpperCamelCase : str = s
_UpperCamelCase : Any = ss
# check if se have reached the starting point
if len(__A ) == 0:
return list(__A )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Tuple = []
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : List[str] = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_UpperCamelCase : Optional[Any] = -2
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Tuple = s
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCamelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCamelCase : List[Any] = len(__A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCamelCase : List[Any] = True
if len(__A ) != 0:
_UpperCamelCase : Union[str, Any] = stack[len(__A ) - 1]
else:
_UpperCamelCase : Optional[int] = False
indirect_parents.append(__A )
_UpperCamelCase : Optional[int] = s
_UpperCamelCase : Optional[int] = ss
# check if se have reached the starting point
if len(__A ) == 0:
return False
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Union[str, Any]=-2 ,lowerCamelCase__ : Optional[int]=-1 ):
'''simple docstring'''
_UpperCamelCase : Tuple = time()
self.dfs(__A ,__A )
_UpperCamelCase : Optional[int] = time()
return end - begin
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Dict=-2 ):
'''simple docstring'''
_UpperCamelCase : List[Any] = time()
self.bfs(__A )
_UpperCamelCase : Optional[Any] = time()
return end - begin
class lowercase__ :
def __init__( self : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = {}
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any=1 ):
'''simple docstring'''
# check if the u exists
if self.graph.get(__A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCamelCase : List[Any] = [[w, v]]
# add the other way
if self.graph.get(__A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCamelCase : List[Any] = [[w, u]]
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
if self.graph.get(__A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__A )
# the other way round
if self.graph.get(__A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__A )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any]=-2 ,lowerCamelCase__ : int=-1 ):
'''simple docstring'''
if s == d:
return []
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
if s == -2:
_UpperCamelCase : Any = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_UpperCamelCase : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCamelCase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__A ) != 0:
_UpperCamelCase : Dict = stack[len(__A ) - 1]
else:
_UpperCamelCase : Any = ss
# check if se have reached the starting point
if len(__A ) == 0:
return visited
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Tuple=-1 ):
'''simple docstring'''
if c == -1:
_UpperCamelCase : str = floor(random() * 10000 ) + 10
for i in range(__A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCamelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(__A ,__A ,1 )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Dict=-2 ):
'''simple docstring'''
_UpperCamelCase : Any = deque()
_UpperCamelCase : Tuple = []
if s == -2:
_UpperCamelCase : Tuple = list(self.graph )[0]
d.append(__A )
visited.append(__A )
while d:
_UpperCamelCase : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : int = []
_UpperCamelCase : List[Any] = []
_UpperCamelCase : List[str] = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_UpperCamelCase : Union[str, Any] = -2
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = s
_UpperCamelCase : str = False
_UpperCamelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCamelCase : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCamelCase : List[str] = len(__A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCamelCase : str = True
if len(__A ) != 0:
_UpperCamelCase : List[str] = stack[len(__A ) - 1]
else:
_UpperCamelCase : Optional[int] = False
indirect_parents.append(__A )
_UpperCamelCase : Tuple = s
_UpperCamelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(__A ) == 0:
return list(__A )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = []
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_UpperCamelCase : Optional[Any] = -2
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = s
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCamelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCamelCase : Any = len(__A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCamelCase : int = True
if len(__A ) != 0:
_UpperCamelCase : List[Any] = stack[len(__A ) - 1]
else:
_UpperCamelCase : Optional[Any] = False
indirect_parents.append(__A )
_UpperCamelCase : List[Any] = s
_UpperCamelCase : str = ss
# check if se have reached the starting point
if len(__A ) == 0:
return False
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return list(self.graph )
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Union[str, Any]=-2 ,lowerCamelCase__ : int=-1 ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = time()
self.dfs(__A ,__A )
_UpperCamelCase : Optional[int] = time()
return end - begin
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Tuple=-2 ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = time()
self.bfs(__A )
_UpperCamelCase : Optional[int] = time()
return end - begin
| 83 |
"""simple docstring"""
from math import factorial
def _A ( lowercase = 1_00 ):
"""simple docstring"""
return sum(int(lowercase ) for x in str(factorial(lowercase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip()))) | 81 | 0 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE__:Any = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
SCREAMING_SNAKE_CASE__:Optional[int] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
SCREAMING_SNAKE_CASE__:Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _lowerCamelCase( a ):
__a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowerCamelCase( a ):
return x[0]
def _lowerCamelCase( a ):
__a = get_letter_count(a )
__a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a )
__a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a )
__a = "".join(freq_to_letter[freq] )
__a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a , reverse=a )
__a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a )
def _lowerCamelCase( a ):
__a = get_frequency_order(a )
__a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__:List[Any] = None
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:List[str] = {
"""camembert-base""": 512,
}
SCREAMING_SNAKE_CASE__:str = """▁"""
class snake_case__ ( snake_case_ ):
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ["""input_ids""", """attention_mask"""]
_snake_case : str = CamembertTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
__a = vocab_file
__a = False if not self.vocab_file else True
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 268 | 1 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase__ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase__ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase__ = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
lowercase__ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class A_ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""input_ids"""]
UpperCAmelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=False , lowercase_ : Any="utf8" , lowercase_ : Optional[Any]="[UNK]" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : int="[PAD]" , lowercase_ : Tuple="[CLS]" , lowercase_ : Optional[Any]="[MASK]" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : str , ) -> None:
UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , vocab_file=snake_case_ , encoding=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
UpperCAmelCase : Tuple = do_lower_case
UpperCAmelCase : int = sentencepiece_model_ckpt
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase : Union[str, Any] = self.load_vocab(filepath=snake_case_ )
else:
UpperCAmelCase : Optional[int] = {self.sp_model.id_to_piece(snake_case_ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase : Optional[int] = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
if text is None:
return None
UpperCAmelCase : Dict = self.tokenize(snake_case_ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = '', []
for i, ch in enumerate(snake_case_ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase : Tuple = self.SP_CHAR_MAPPING.get(snake_case_ )
else:
UpperCAmelCase : Optional[int] = unicodedata.normalize('NFKC' , snake_case_ )
if self.is_whitespace(snake_case_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case_ ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase : Any = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase : Union[str, Any] = token[1:]
UpperCAmelCase : Dict = text[offset:].index(snake_case_ ) + offset
UpperCAmelCase : Tuple = start + len(snake_case_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase : List[Any] = end
return token_mapping
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
return len(self.vocab )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ) -> Dict:
UpperCAmelCase : Dict = self.__dict__.copy()
UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : List[str] = {}
UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[str] ) -> Tuple:
return "".join((self.SP_CHAR_MAPPING.get(snake_case_ , snake_case_ ) for c in text) )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[Any]=64 , lowercase_ : int=0.1 ) -> Dict:
if self.sp_model_kwargs.get('enable_sampling' ) is True:
UpperCAmelCase : Tuple = True
if self.sp_model_kwargs.get('alpha' ) is not None:
UpperCAmelCase : Optional[int] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
UpperCAmelCase : int = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
UpperCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(snake_case_ )
else:
UpperCAmelCase : Any = self.sp_model.SampleEncodeAsPieces(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase : Any = []
for pi, piece in enumerate(snake_case_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case_ ) and pi != 0:
new_pieces.append(snake_case_ )
continue
else:
continue
UpperCAmelCase : int = 0
for i, chunk in enumerate(snake_case_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case_ ) or self.is_punct(snake_case_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case_ )
UpperCAmelCase : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : Dict = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : List[Any] = i
if len(snake_case_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> str:
UpperCAmelCase : Any = ''.join(snake_case_ ).replace(snake_case_ , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Dict:
UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(snake_case_ )
UpperCAmelCase : Union[str, Any] = ''.join(snake_case_ ).replace(snake_case_ , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Any ) -> Dict:
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[Any] ) -> Union[str, Any]:
return self.reverse_vocab.get(snake_case_ , self.unk_token )
def UpperCAmelCase_ ( self : int , lowercase_ : Any , lowercase_ : List[Any]=None ) -> Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
UpperCAmelCase : Dict = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : int=None ) -> str:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=False ) -> Optional[Any]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def UpperCAmelCase_ ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case_ ) + 1) + [1] * (len(snake_case_ ) + 3)
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple ) -> Optional[int]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : str ) -> Optional[Any]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[int] ) -> Tuple:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[Any] ) -> List[str]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case_ ) == 1:
UpperCAmelCase : Optional[int] = unicodedata.category(snake_case_ )
if cat == "Zs":
return True
return False
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Optional[Any] ) -> int:
UpperCAmelCase : Optional[Any] = {}
with io.open(snake_case_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(snake_case_ ):
UpperCAmelCase : Tuple = line.rstrip('\n' )
UpperCAmelCase : Tuple = int(snake_case_ )
return token_to_idx
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : str = 0
if os.path.isdir(snake_case_ ):
UpperCAmelCase : Optional[int] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCAmelCase : str = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(snake_case_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
UpperCAmelCase : Tuple = token_index
writer.write(token + '\n' )
index += 1
UpperCAmelCase : Optional[int] = os.path.join(snake_case_ , 'sentencepiece.bpe.model' )
with open(snake_case_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (vocab_file,)
| 151 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = len(lowercase_ )
while cur > 1:
# Find the maximum number in arr
A__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A__ = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )]
# Reverse whole list
A__ = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 247 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'autoformer'
A_ : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "student_t" , __UpperCAmelCase = "nll" , __UpperCAmelCase = 1 , __UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] , __UpperCAmelCase = True , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 64 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 100 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = True , __UpperCAmelCase=True , __UpperCAmelCase = 10 , __UpperCAmelCase = 25 , __UpperCAmelCase = 3 , **__UpperCAmelCase , ) -> int:
# time series specific configuration
_a = prediction_length
_a = context_length if context_length is not None else prediction_length
_a = distribution_output
_a = loss
_a = input_size
_a = num_time_features
_a = lags_sequence
_a = scaling
_a = num_dynamic_real_features
_a = num_static_real_features
_a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_a = cardinality
else:
_a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_a = embedding_dimension
else:
_a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_a = num_parallel_samples
# Transformer architecture configuration
_a = input_size * len(self.lags_sequence ) + self._number_of_features
_a = d_model
_a = encoder_attention_heads
_a = decoder_attention_heads
_a = encoder_ffn_dim
_a = decoder_ffn_dim
_a = encoder_layers
_a = decoder_layers
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = activation_function
_a = init_std
_a = use_cache
# Autoformer
_a = label_length
_a = moving_average
_a = autocorrelation_factor
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 352 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 153 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCAmelCase ( a_: Dict ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[Any] ):
_UpperCAmelCase : Optional[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_UpperCAmelCase : str = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head" )
_UpperCAmelCase : Optional[int] = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head" )
_UpperCAmelCase : Dict = key.replace("heads.cmd.itm_head.cls", "itm_head" )
_UpperCAmelCase : Dict = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler" )
_UpperCAmelCase : Dict = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale" )
_UpperCAmelCase : Tuple = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head" )
_UpperCAmelCase : int = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head" )
_UpperCAmelCase : str = key.replace("mm_text_projection", "flava.text_to_mm_projection" )
_UpperCAmelCase : str = key.replace("mm_image_projection", "flava.image_to_mm_projection" )
_UpperCAmelCase : int = key.replace("image_encoder.module", "flava.image_model" )
_UpperCAmelCase : Union[str, Any] = key.replace("text_encoder.module", "flava.text_model" )
_UpperCAmelCase : str = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token" )
_UpperCAmelCase : Optional[Any] = key.replace("mm_encoder.module", "flava.multimodal_model" )
_UpperCAmelCase : List[str] = key.replace("text_projection", "flava.text_projection" )
_UpperCAmelCase : str = key.replace("image_projection", "flava.image_projection" )
_UpperCAmelCase : Optional[int] = value.float()
for key, value in codebook_state_dict.items():
_UpperCAmelCase : Dict = value
return upgrade
@torch.no_grad()
def __UpperCAmelCase ( a_: Dict, a_: Any, a_: int, a_: Any=None ):
if config_path is not None:
_UpperCAmelCase : Tuple = FlavaConfig.from_pretrained(a_ )
else:
_UpperCAmelCase : List[str] = FlavaConfig()
_UpperCAmelCase : int = FlavaForPreTraining(a_ ).eval()
_UpperCAmelCase : Union[str, Any] = convert_dalle_checkpoint(a_, a_, save_checkpoint=a_ )
if os.path.exists(a_ ):
_UpperCAmelCase : Tuple = torch.load(a_, map_location="cpu" )
else:
_UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(a_, map_location="cpu" )
_UpperCAmelCase : Dict = upgrade_state_dict(a_, a_ )
hf_model.load_state_dict(a_ )
_UpperCAmelCase : Optional[Any] = hf_model.state_dict()
_UpperCAmelCase : int = count_parameters(a_ )
_UpperCAmelCase : str = count_parameters(a_ ) + count_parameters(a_ )
assert torch.allclose(a_, a_, atol=1e-3 )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path) | 145 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : List[Any] = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : Union[str, Any] = self.column_names
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : Tuple = CsvConfig
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : Union[str, Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : List[Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Tuple = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise | 145 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCAmelCase : List[str] = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __lowerCAmelCase (_UpperCamelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCAmelCase : int = format(_UpperCamelCase , '08x' )[-8:]
__lowerCAmelCase : Tuple = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Any = b''
for char in message:
bit_string += format(_UpperCamelCase , '08b' ).encode('utf-8' )
__lowerCAmelCase : List[str] = format(len(_UpperCamelCase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_UpperCamelCase ) , 512 ):
__lowerCAmelCase : Tuple = bit_string[pos : pos + 512]
__lowerCAmelCase : str = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __lowerCAmelCase (_UpperCamelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCAmelCase : List[str] = format(_UpperCamelCase , '032b' )
__lowerCAmelCase : Union[str, Any] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCamelCase , 2 )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return (a + b) % 2**32
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Dict = preprocess(_UpperCamelCase )
__lowerCAmelCase : str = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCAmelCase : Tuple = 0x67452301
__lowerCAmelCase : List[str] = 0xEFCDAB89
__lowerCAmelCase : List[str] = 0x98BADCFE
__lowerCAmelCase : Optional[Any] = 0x10325476
__lowerCAmelCase : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = aa
__lowerCAmelCase : List[str] = ba
__lowerCAmelCase : Dict = ca
__lowerCAmelCase : Optional[int] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCAmelCase : Any = d ^ (b & (c ^ d))
__lowerCAmelCase : Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCAmelCase : Union[str, Any] = c ^ (d & (b ^ c))
__lowerCAmelCase : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
__lowerCAmelCase : Optional[int] = b ^ c ^ d
__lowerCAmelCase : str = (3 * i + 5) % 16
else:
__lowerCAmelCase : Optional[Any] = c ^ (b | not_aa(_UpperCamelCase ))
__lowerCAmelCase : Optional[Any] = (7 * i) % 16
__lowerCAmelCase : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCAmelCase : Optional[Any] = d
__lowerCAmelCase : int = c
__lowerCAmelCase : Tuple = b
__lowerCAmelCase : Union[str, Any] = sum_aa(_UpperCamelCase , left_rotate_aa(_UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCAmelCase : str = sum_aa(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Optional[Any] = sum_aa(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = sum_aa(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : str = sum_aa(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Any = reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 350 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__lowerCAmelCase : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 'sgugger/tiny-distilbert-classification'
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() ) | 182 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase : Tuple = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204 |
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = knapsack(lowercase , lowercase , lowercase , lowercase , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase = values[index] + knapsack(
lowercase , lowercase , lowercase , max_weight - weights[index] , index + 1 )
return max(lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __a ( __UpperCamelCase ):
__lowercase : List[str] = 'deberta-v2'
def __init__( self , lowerCAmelCase__=128_100 , lowerCAmelCase__=1_536 , lowerCAmelCase__=24 , lowerCAmelCase__=24 , lowerCAmelCase__=6_144 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-7 , lowerCAmelCase__=False , lowerCAmelCase__=-1 , lowerCAmelCase__=0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=0 , lowerCAmelCase__="gelu" , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase__: str = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Union[str, Any] = num_attention_heads
lowercase__: Any = intermediate_size
lowercase__: str = hidden_act
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: Optional[int] = attention_probs_dropout_prob
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: int = type_vocab_size
lowercase__: Optional[int] = initializer_range
lowercase__: List[str] = relative_attention
lowercase__: List[str] = max_relative_positions
lowercase__: Union[str, Any] = pad_token_id
lowercase__: Optional[int] = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase__ ) == str:
lowercase__: Optional[int] = [x.strip() for x in pos_att_type.lower().split('|' )]
lowercase__: List[str] = pos_att_type
lowercase__: List[str] = vocab_size
lowercase__: Tuple = layer_norm_eps
lowercase__: Optional[int] = kwargs.get('pooler_hidden_size' , lowerCAmelCase__ )
lowercase__: Tuple = pooler_dropout
lowercase__: Union[str, Any] = pooler_hidden_act
class __a ( __UpperCamelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__: Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__: Any = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return 12
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 40 , lowerCAmelCase__ = 40 , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase__: Any = super().generate_dummy_inputs(preprocessor=lowerCAmelCase__ , framework=lowerCAmelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 351 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=128 , lowerCAmelCase__=32 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = parent
lowercase__: str = batch_size
lowercase__: Dict = seq_length
lowercase__: str = is_training
lowercase__: List[str] = use_input_mask
lowercase__: str = use_token_type_ids
lowercase__: Tuple = use_labels
lowercase__: int = vocab_size
lowercase__: Dict = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[str] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Dict = max_position_embeddings
lowercase__: Optional[Any] = type_vocab_size
lowercase__: List[str] = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Optional[int] = num_labels
lowercase__: Union[str, Any] = num_choices
lowercase__: int = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[str] = None
if self.use_input_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Optional[int] = None
if self.use_token_type_ids:
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Tuple = None
lowercase__: Optional[Any] = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): Tuple = self.prepare_config_and_inputs()
lowercase__: Optional[int] = True
lowercase__: Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[Any] = NezhaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = True
lowercase__: Optional[Any] = NezhaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
lowercase__: str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = NezhaForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Any = NezhaForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Union[str, Any] = NezhaForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = NezhaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = self.num_labels
lowercase__: List[Any] = NezhaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.num_labels
lowercase__: Dict = NezhaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.num_choices
lowercase__: str = NezhaForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): str = config_and_inputs
lowercase__: Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : List[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
lowercase__: int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = NezhaModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__: str = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[str] = NezhaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase__: Optional[int] = True
lowercase__: Optional[int] = model_class(config=lowerCAmelCase__ )
lowercase__: Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'bert.pt' ) )
lowercase__: List[str] = torch.jit.load(os.path.join(lowerCAmelCase__ , 'bert.pt' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) )
@require_torch
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: int = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Union[str, Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 288 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.