code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def a ( A__ : Dict ) -> List[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('deeplabv3_' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='pascal-voc-id2label.json'
else:
_lowercase =1000
_lowercase ='imagenet-1k-id2label.json'
_lowercase ='huggingface/label-files'
_lowercase =json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
_lowercase ={int(A__ ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def a ( A__ : Tuple , A__ : List[Any]=False ) -> List[str]:
"""simple docstring"""
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
_lowercase =name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
_lowercase =name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_lowercase =name.replace('.block.' , '.' )
if "exp_1x1" in name:
_lowercase =name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_lowercase =name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_lowercase =name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_lowercase =name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_lowercase =name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
_lowercase =name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
_lowercase =name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
_lowercase =name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_lowercase =name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_lowercase =name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
_lowercase =name.replace(F'''.global_rep.{i}.weight''' , '.layernorm.weight' )
if F'''.global_rep.{i}.bias''' in name:
_lowercase =name.replace(F'''.global_rep.{i}.bias''' , '.layernorm.bias' )
if ".global_rep." in name:
_lowercase =name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_lowercase =name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_lowercase =name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_lowercase =name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_lowercase =name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_lowercase =name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='mobilevit.' + name
return name
def a ( A__ : int , A__ : List[str] , A__ : List[Any]=False ) -> List[str]:
"""simple docstring"""
if base_model:
_lowercase =''
else:
_lowercase ='mobilevit.'
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(A__ )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('.' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def a ( ) -> Dict:
"""simple docstring"""
_lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase =Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def a ( A__ : Optional[int] , A__ : Any , A__ : Any , A__ : List[str]=False ) -> Optional[int]:
"""simple docstring"""
_lowercase =get_mobilevit_config(A__ )
# load original state_dict
_lowercase =torch.load(A__ , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_lowercase =MobileViTForSemanticSegmentation(A__ ).eval()
else:
_lowercase =MobileViTForImageClassification(A__ ).eval()
_lowercase =convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='pt' )
_lowercase =model(**A__ )
_lowercase =outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , A__ , atol=1e-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
_lowercase ={
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(A__ , organization='apple' )
model.push_to_hub(A__ , organization='apple' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 291 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : List[str]=7 ) -> Dict:
"""simple docstring"""
lowercase__ = None
if token is not None:
lowercase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase__ = """636036"""
lowercase__ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase__ = requests.get(__magic_name__ , headers=__magic_name__ ).json()
return result["workflow_runs"]
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = get_daily_ci_runs(__magic_name__ )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run["""id"""]
break
return workflow_run_id
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
lowercase__ = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=__magic_name__ , token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ , artifact_url=__magic_name__ , output_dir=__magic_name__ , token=__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(__magic_name__ , f'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
lowercase__ = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
lowercase__ = f.read().decode("""UTF-8""" )
return results
| 15 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase : int = get_logger(__name__)
lowercase : List[str] = Path(__file__).parent / """model_card_template.md"""
lowercase : str = uuida().hex
lowercase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : int = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def UpperCAmelCase_ ( _UpperCAmelCase = None ):
lowerCamelCase_: Optional[Any] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
ua += "; " + user_agent
return ua
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
if token is None:
lowerCamelCase_: List[str] = HfFolder.get_token()
if organization is None:
lowerCamelCase_: str = whoami(_UpperCAmelCase )["""name"""]
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(_UpperCAmelCase , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowerCamelCase_: Union[str, Any] = args.hub_token if hasattr(_UpperCAmelCase , """hub_token""" ) else None
lowerCamelCase_: Any = get_full_repo_name(_UpperCAmelCase , token=_UpperCAmelCase )
lowerCamelCase_: Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_UpperCAmelCase , model_name=_UpperCAmelCase , repo_name=_UpperCAmelCase , dataset_name=args.dataset_name if hasattr(_UpperCAmelCase , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_UpperCAmelCase , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_UpperCAmelCase , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(_UpperCAmelCase , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_UpperCAmelCase , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_UpperCAmelCase , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_UpperCAmelCase , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_UpperCAmelCase , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_UpperCAmelCase , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(_UpperCAmelCase , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_UpperCAmelCase , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowerCamelCase_: Tuple = os.path.join(args.output_dir , """README.md""" )
model_card.save(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCamelCase_: Any = str(Path(_UpperCAmelCase ).as_posix() )
lowerCamelCase_: str = re.search(r"""snapshots/([^/]+)/""" , _UpperCAmelCase )
if search is None:
return None
lowerCamelCase_: Optional[int] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase : List[str] = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase : int = os.path.join(hf_cache_home, """diffusers""")
def UpperCAmelCase_ ( _UpperCAmelCase = None , _UpperCAmelCase = None ):
if new_cache_dir is None:
lowerCamelCase_: Optional[int] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCamelCase_: List[str] = old_diffusers_cache
lowerCamelCase_: Union[str, Any] = Path(_UpperCAmelCase ).expanduser()
lowerCamelCase_: Union[str, Any] = Path(_UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCamelCase_: str = new_cache_dir / old_blob_path.relative_to(_UpperCAmelCase )
new_blob_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
os.replace(_UpperCAmelCase , _UpperCAmelCase )
try:
os.symlink(_UpperCAmelCase , _UpperCAmelCase )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase : Optional[int] = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase : Optional[Any] = 0
else:
with open(cache_version_file) as f:
try:
lowercase : Union[str, Any] = int(f.read())
except ValueError:
lowercase : Dict = 0
if cache_version < 1:
lowercase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
"""the directory exists and can be written to."""
)
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = None ):
if variant is not None:
lowerCamelCase_: Optional[Any] = weights_name.split(""".""" )
lowerCamelCase_: Tuple = splits[:-1] + [variant] + splits[-1:]
lowerCamelCase_: Dict = """.""".join(_UpperCAmelCase )
return weights_name
def UpperCAmelCase_ ( _UpperCAmelCase , *,
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , ):
lowerCamelCase_: Optional[Any] = str(_UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(_UpperCAmelCase ):
if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
lowerCamelCase_: List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ):
lowerCamelCase_: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_UpperCAmelCase ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowerCamelCase_: Any = hf_hub_download(
_UpperCAmelCase , filename=_add_variant(_UpperCAmelCase , _UpperCAmelCase ) , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , user_agent=_UpperCAmelCase , subfolder=_UpperCAmelCase , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , _UpperCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_UpperCAmelCase , _UpperCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_UpperCAmelCase , _UpperCAmelCase )}' so that the correct variant file can be added.""" , _UpperCAmelCase , )
try:
# 2. Load model file as usual
lowerCamelCase_: int = hf_hub_download(
_UpperCAmelCase , filename=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , user_agent=_UpperCAmelCase , subfolder=_UpperCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"""this model name. Check the model page at """
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 721 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = DistilBertTokenizer
_A = DistilBertTokenizerFast
_A = True
@slow
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Any = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowerCamelCase_: str = tokenizer.encode("""sequence builders""" , add_special_tokens=A_ )
lowerCamelCase_: List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A_ )
lowerCamelCase_: int = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 584 | 0 |
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.2_5) = }""")
print(f"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
| 624 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
_UpperCamelCase : Optional[Any] = []
def generate(lowercase_ ,lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 ,lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_UpperCamelCase, _UpperCamelCase : List[str] = arr[k - 1], arr[i]
else: # k is odd
_UpperCamelCase, _UpperCamelCase : int = arr[k - 1], arr[0]
generate(k - 1 ,lowercase_ )
generate(len(lowercase_ ) ,lowercase_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 624 | 1 |
from ... import PretrainedConfig
UpperCamelCase__ : List[str] = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__a : Tuple = "nezha"
def __init__( self ,snake_case__=21128 ,snake_case__=768 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=64 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=0.1 ,snake_case__=0 ,snake_case__=2 ,snake_case__=3 ,snake_case__=True ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = max_relative_position
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( _lowercase : str = "isbn/0140328726") -> dict:
"""simple docstring"""
a__ : Optional[Any] = olid.strip().strip("""/""") # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""") != 1:
a__ : Optional[int] = F'''{olid} is not a valid Open Library olid'''
raise ValueError(_lowercase)
return requests.get(F'''https://openlibrary.org/{new_olid}.json''').json()
def lowerCAmelCase_ ( _lowercase : dict) -> dict:
"""simple docstring"""
a__ : Tuple = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
a__ : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
a__ : Any = [
get_openlibrary_data(author["""key"""])["""name"""] for author in data["""Authors"""]
]
a__ : Any = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_lowercase , _lowercase):
a__ : Any = """, """.join(_lowercase)
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase : Any =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
_lowercase : Union[str, Any] =summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print("\n".join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 136 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[int] =16
_lowercase : List[str] =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""")
a__ : Optional[int] = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : Optional[int]):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Optional[int] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Any = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
a__ : List[Any] = 8
else:
a__ : Any = None
return tokenizer.pad(
_lowercase , padding="""longest""" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=_lowercase)
a__ : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : int) -> List[str]:
"""simple docstring"""
# Initialize accelerator
a__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : List[Any] = config["""lr"""]
a__ : int = int(config["""num_epochs"""])
a__ : Tuple = int(config["""seed"""])
a__ : Dict = int(config["""batch_size"""])
a__ : Dict = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
a__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
a__ : str = MAX_GPU_BATCH_SIZE
set_seed(_lowercase)
a__ , a__ : Any = get_dataloaders(_lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : Dict = model.to(accelerator.device)
# Instantiate optimizer
a__ : Any = AdamW(params=model.parameters() , lr=_lowercase)
# Instantiate scheduler
a__ : List[str] = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# Now we train the model
for epoch in range(_lowercase):
model.train()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
a__ : List[str] = model(**_lowercase)
a__ : List[str] = outputs.loss
a__ : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : Any = model(**_lowercase)
a__ : Optional[Any] = outputs.logits.argmax(dim=-1)
a__ , a__ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=_lowercase , default=_lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
a__ : Optional[int] = parser.parse_args()
a__ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 136 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
def __init__( self , A__ , A__=3 , A__=32 , A__=3 , A__=10 , A__=[8, 16, 32, 64] , A__=[1, 1, 2, 1] , A__=True , A__=True , A__="relu" , A__=3 , A__=None , A__=["stage2", "stage3", "stage4"] , A__=[2, 3, 4] , A__=1 , ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = parent
UpperCAmelCase_: Union[str, Any] = batch_size
UpperCAmelCase_: Dict = image_size
UpperCAmelCase_: Optional[Any] = num_channels
UpperCAmelCase_: Dict = embeddings_size
UpperCAmelCase_: Any = hidden_sizes
UpperCAmelCase_: Dict = depths
UpperCAmelCase_: Optional[int] = is_training
UpperCAmelCase_: List[str] = use_labels
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: str = num_labels
UpperCAmelCase_: Union[str, Any] = scope
UpperCAmelCase_: List[Any] = len(UpperCAmelCase_ )
UpperCAmelCase_: str = out_features
UpperCAmelCase_: List[str] = out_indices
UpperCAmelCase_: Optional[Any] = num_groups
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: List[str] = None
if self.use_labels:
UpperCAmelCase_: Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_: int = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case_ ( self , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = BitModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.num_labels
UpperCAmelCase_: Union[str, Any] = BitForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCAmelCase_: Tuple = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = BitBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCAmelCase_: Dict = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_: Dict = None
UpperCAmelCase_: Dict = BitBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCAmelCase_: int = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = config_and_inputs
UpperCAmelCase_: Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
snake_case_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = BitModelTester(self )
UpperCAmelCase_: int = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def snake_case_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Dict = model_class(UpperCAmelCase_ )
UpperCAmelCase_: List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Any = [*signature.parameters.keys()]
UpperCAmelCase_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Any = model_class(config=UpperCAmelCase_ )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def snake_case_ ( self ):
"""simple docstring"""
def check_hidden_states_output(A__ , A__ , A__ ):
UpperCAmelCase_: Tuple = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_: str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
UpperCAmelCase_: Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_: Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: List[Any] = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_: Optional[Any] = layer_type
UpperCAmelCase_: List[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_: Union[str, Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def snake_case_ ( self ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Optional[int] = BitModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowercase ( ) -> Dict:
UpperCAmelCase_: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase_ )
UpperCAmelCase_: Optional[Any] = self.default_image_processor
UpperCAmelCase_: Optional[int] = prepare_img()
UpperCAmelCase_: str = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_: List[str] = model(**UpperCAmelCase_ )
# verify the logits
UpperCAmelCase_: Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
UpperCAmelCase_: Optional[int] = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase__ ( __UpperCAmelCase , unittest.TestCase ):
snake_case_ = (BitBackbone,) if is_torch_available() else ()
snake_case_ = BitConfig
snake_case_ = False
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = BitModelTester(self ) | 711 |
class UpperCAmelCase__ :
def __init__( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Tuple = arr.split("," )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = [int(self.array[0] )] * len(self.array )
UpperCAmelCase_: List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCAmelCase_: Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCAmelCase_: Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_lowerCAmelCase = input("""please input some numbers:""")
_lowerCAmelCase = SubArray(whole_array)
_lowerCAmelCase = array.solve_sub_array()
print(("""the results is:""", re)) | 306 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
UpperCAmelCase__ = None
UpperCAmelCase__ = {
'''7B''': 11008,
'''13B''': 13824,
'''30B''': 17920,
'''65B''': 22016,
'''70B''': 28672,
}
UpperCAmelCase__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def _a ( a :List[str] , a :Any=1 , a :Optional[Any]=256 ) -> Any:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( a :List[Any] ) -> List[str]:
with open(snake_case_ , '''r''' ) as f:
return json.load(snake_case_ )
def _a ( a :Any , a :str ) -> Optional[int]:
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _a ( a :List[Any] , a :List[str] , a :List[Any] , a :Tuple=True ) -> List[str]:
os.makedirs(snake_case_ , exist_ok=snake_case_ )
a = os.path.join(snake_case_ , '''tmp''' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
a = read_json(os.path.join(snake_case_ , '''params.json''' ) )
a = NUM_SHARDS[model_size]
a = params['''n_layers''']
a = params['''n_heads''']
a = n_heads // num_shards
a = params['''dim''']
a = dim // n_heads
a = 10_000.0
a = 1.0 / (base ** (torch.arange(0 , snake_case_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
a = params['''n_kv_heads'''] # for GQA / MQA
a = n_heads_per_shard // num_key_value_heads
a = dim // num_key_value_heads
else: # compatibility with other checkpoints
a = n_heads
a = n_heads_per_shard
a = dim
# permute for sliced rotary
def permute(a :Optional[int] , a :Optional[Any]=n_heads , a :Optional[int]=dim , a :Dict=dim ):
return w.view(snake_case_ , dima // n_heads // 2 , 2 , snake_case_ ).transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
a = torch.load(os.path.join(snake_case_ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
a = [
torch.load(os.path.join(snake_case_ , F"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(snake_case_ )
]
a = 0
a = {'''weight_map''': {}}
for layer_i in range(snake_case_ ):
a = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
a = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
a = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
a = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ ) )
a = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ ) , snake_case_ , snake_case_ , snake_case_ , )
a = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ )
a = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(snake_case_ )] , dim=1 )
a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(snake_case_ )] , dim=0 )
a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(snake_case_ )] , dim=1 )
a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(snake_case_ )] , dim=0 )
a = inv_freq
for k, v in state_dict.items():
a = filename
param_count += v.numel()
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
a = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
a = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
a = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(snake_case_ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(snake_case_ )] , dim=0 ),
}
for k, v in state_dict.items():
a = filename
param_count += v.numel()
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
# Write configs
a = {'''total_size''': param_count * 2}
write_json(snake_case_ , os.path.join(snake_case_ , '''pytorch_model.bin.index.json''' ) )
a = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
a = params['''multiple_of'''] if '''multiple_of''' in params else 256
a = LlamaConfig(
hidden_size=snake_case_ , intermediate_size=compute_intermediate_size(snake_case_ , snake_case_ , snake_case_ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=snake_case_ , )
config.save_pretrained(snake_case_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
a = LlamaForCausalLM.from_pretrained(snake_case_ , torch_dtype=torch.floataa , low_cpu_mem_usage=snake_case_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(snake_case_ , safe_serialization=snake_case_ )
shutil.rmtree(snake_case_ )
def _a ( a :Tuple , a :List[str] ) -> List[str]:
a = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
a = tokenizer_class(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
def _a ( ) -> Union[str, Any]:
a = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=snake_case_ , help='''Whether or not to save using `safetensors`.''' )
a = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
a = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , snake_case_ )
if __name__ == "__main__":
main()
| 117 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : np.ndarray , snake_case_ : Union[int, Iterable[int]] , snake_case_ : bool , snake_case_ : int ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(snake_case_ : Dict , snake_case_ : str , snake_case_ : Dict=0 , snake_case_ : Optional[int]=None ):
_lowerCAmelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase = (output_size, output_size) if isinstance(snake_case_ , snake_case_ ) else output_size
_lowerCAmelCase , _lowerCAmelCase = get_image_size(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = output_size
# determine new height and width
_lowerCAmelCase = output_height / input_height
_lowerCAmelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase = scale_width
else:
# fit height
_lowerCAmelCase = scale_height
_lowerCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case_ )
_lowerCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case_ )
return (new_height, new_width)
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = ['pixel_values']
def __init__(self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = size if size is not None else {"""height""": 384, """width""": 384}
_lowerCAmelCase = get_size_dict(lowerCamelCase )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = keep_aspect_ratio
_lowerCAmelCase = ensure_multiple_of
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowerCAmelCase = get_resize_output_image_size(
lowerCamelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=lowerCamelCase , multiple=lowerCamelCase , )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(lowerCamelCase )
_lowerCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(lowerCamelCase ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCamelCase )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 156 | 0 |
"""simple docstring"""
def A__ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowercase = generate_large_matrix()
lowercase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( _UpperCAmelCase : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for row in grid )
assert all(list(_UpperCAmelCase ) == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for col in zip(*_UpperCAmelCase ) )
def A__ ( _UpperCAmelCase : list[int] ) -> int:
'''simple docstring'''
snake_case__ : Tuple = 0
snake_case__ : List[Any] = len(_UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case__ : Tuple = (left + right) // 2
snake_case__ : Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case__ : Optional[Any] = mid + 1
else:
snake_case__ : Any = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCAmelCase )
def A__ ( _UpperCAmelCase : list[list[int]] ) -> int:
'''simple docstring'''
snake_case__ : Any = 0
snake_case__ : Any = len(grid[0] )
for i in range(len(_UpperCAmelCase ) ):
snake_case__ : str = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCAmelCase ) * len(grid[0] )) - total
def A__ ( _UpperCAmelCase : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A__ ( _UpperCAmelCase : list[list[int]] ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = 0
for row in grid:
for i, number in enumerate(_UpperCAmelCase ):
if number < 0:
total += len(_UpperCAmelCase ) - i
break
return total
def A__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("Running benchmarks" )
snake_case__ : List[Any] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case__ : Optional[int] = timeit(F"""{func}(grid=grid)""" , setup=_UpperCAmelCase , number=5_00 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 701 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : Tuple = 8
# DPR tok
snake_case__ : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case__ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer")
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__)
snake_case__ : Optional[int] = os.path.join(lowerCamelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
snake_case__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case__ : str = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
snake_case__ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case__ : List[Any] = {"unk_token": "<unk>"}
snake_case__ : Tuple = os.path.join(self.tmpdirname , "bart_tokenizer")
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__)
snake_case__ : Any = os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"])
snake_case__ : Optional[Any] = os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(lowerCamelCase__) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(lowerCamelCase__))
def UpperCAmelCase ( self) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def UpperCAmelCase ( self) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer"))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
snake_case__ : Any = os.path.join(self.tmpdirname , "rag_tokenizer")
snake_case__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
snake_case__ : Dict = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(lowerCamelCase__)
rag_tokenizer.save_pretrained(lowerCamelCase__)
snake_case__ : Optional[int] = RagTokenizer.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__)
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase__)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase__)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = RagTokenizer.from_pretrained("facebook/rag-token-nq")
snake_case__ : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
snake_case__ : Union[str, Any] = tokenizer(lowerCamelCase__)
self.assertIsNotNone(lowerCamelCase__)
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
snake_case__ : Any = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
snake_case__ : Tuple = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
snake_case__ : Optional[Any] = tokenizer(lowerCamelCase__)
self.assertIsNotNone(lowerCamelCase__)
| 150 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str:
super().__init__(
features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,)
A = Generator(
cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
A = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
A = None
A = None
A = None
A = None
self.builder.download_and_prepare(
download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,)
A = self.builder.as_dataset(
split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory )
return dataset | 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 91 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] =dataset
_UpperCAmelCase : int =process
_UpperCAmelCase : Union[str, Any] =params
def __len__( self) -> List[str]:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any =self.dataset[i]
_UpperCAmelCase : List[Any] =self.process(snake_case , **self.params)
return processed
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case=None) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict =loader
_UpperCAmelCase : str =infer
_UpperCAmelCase : Tuple =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase : List[str] =None
_UpperCAmelCase : Dict =loader_batch_size
# Internal bookkeeping
_UpperCAmelCase : Union[str, Any] =None
_UpperCAmelCase : Dict =None
def __len__( self) -> Dict:
'''simple docstring'''
return len(self.loader)
def __iter__( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =iter(self.loader)
return self
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase : Any =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase : Optional[int] ={}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case , snake_case):
# Convert ModelOutput to tuple first
_UpperCAmelCase : Union[str, Any] =element.to_tuple()
if isinstance(element[0] , torch.Tensor):
_UpperCAmelCase : Optional[int] =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
_UpperCAmelCase : Optional[Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case , snake_case):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
_UpperCAmelCase : Union[str, Any] =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
_UpperCAmelCase : Optional[int] =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase : List[Any] =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase : Tuple =element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase : List[str] =np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase : str =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase : Optional[Any] =self._loader_batch_data.__class__(snake_case)
self._loader_batch_index += 1
return result
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase : Dict =next(self.iterator)
_UpperCAmelCase : List[Any] =self.infer(snake_case , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case , torch.Tensor):
_UpperCAmelCase : Optional[int] =processed
else:
_UpperCAmelCase : Tuple =list(processed.keys())[0]
_UpperCAmelCase : List[Any] =processed[key]
if isinstance(snake_case , snake_case):
_UpperCAmelCase : str =len(snake_case)
else:
_UpperCAmelCase : List[Any] =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase : int =observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase : Tuple =processed
_UpperCAmelCase : Optional[Any] =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case=None) -> Any:
'''simple docstring'''
super().__init__(snake_case , snake_case , snake_case)
def __iter__( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict =iter(self.loader)
_UpperCAmelCase : Optional[int] =None
return self
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
if self.subiterator is None:
_UpperCAmelCase : Tuple =self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
_UpperCAmelCase : Optional[int] =next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase : List[Any] =self.infer(next(self.iterator) , **self.params)
_UpperCAmelCase : List[str] =next(self.subiterator)
return processed
class __magic_name__ ( lowerCAmelCase ):
def __iter__( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =iter(self.loader)
return self
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_UpperCAmelCase : List[str] =False
_UpperCAmelCase : int =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase : Optional[int] =self.loader_batch_item()
_UpperCAmelCase : Any =item.pop('is_last')
accumulator.append(snake_case)
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase : Union[str, Any] =self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(snake_case , torch.Tensor):
_UpperCAmelCase : Dict =processed
else:
_UpperCAmelCase : List[str] =list(processed.keys())[0]
_UpperCAmelCase : Dict =processed[key]
if isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
_UpperCAmelCase : Optional[Any] =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase : Tuple =observed_batch_size
_UpperCAmelCase : Optional[Any] =processed
_UpperCAmelCase : Tuple =0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase : Optional[int] =self.loader_batch_item()
_UpperCAmelCase : int =item.pop('is_last')
accumulator.append(snake_case)
if is_last:
return accumulator
else:
_UpperCAmelCase : Optional[int] =processed
_UpperCAmelCase : Any =item.pop('is_last')
accumulator.append(snake_case)
return accumulator
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =dataset
_UpperCAmelCase : Optional[int] =key
def __len__( self) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , snake_case) -> Tuple:
'''simple docstring'''
return self.dataset[i][self.key]
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : str =dataset
_UpperCAmelCase : Any =keya
_UpperCAmelCase : List[str] =keya
def __len__( self) -> str:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , snake_case) -> Optional[int]:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 331 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase =random.Random()
if is_torch_available():
import torch
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=1.0 , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None ):
'''simple docstring'''
if rng is None:
_UpperCAmelCase : Optional[Any] =global_rng
_UpperCAmelCase : Optional[int] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=4_0_0 , snake_case=2_0_0_0 , snake_case=1 , snake_case=0.0 , snake_case=1_6_0_0_0 , snake_case=True , snake_case=True , ) -> int:
'''simple docstring'''
_UpperCAmelCase : int =parent
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Tuple =min_seq_length
_UpperCAmelCase : Tuple =max_seq_length
_UpperCAmelCase : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : int =feature_size
_UpperCAmelCase : List[str] =padding_value
_UpperCAmelCase : int =sampling_rate
_UpperCAmelCase : List[str] =return_attention_mask
_UpperCAmelCase : Tuple =do_normalize
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self , snake_case=False , snake_case=False) -> Any:
'''simple docstring'''
def _flatten(snake_case):
return list(itertools.chain(*snake_case))
if equal_length:
_UpperCAmelCase : List[Any] =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_UpperCAmelCase : Optional[Any] =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_UpperCAmelCase : Optional[int] =[np.asarray(snake_case) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =ASTFeatureExtractor
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =ASTFeatureExtractionTester(self)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : str =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_UpperCAmelCase : Optional[int] =[np.asarray(snake_case) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase : List[str] =feat_extract(speech_inputs[0] , return_tensors='np').input_values
_UpperCAmelCase : List[Any] =feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
# Test batched
_UpperCAmelCase : Tuple =feat_extract(snake_case , padding=snake_case , return_tensors='np').input_values
_UpperCAmelCase : List[Any] =feat_extract(snake_case , padding=snake_case , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : Dict =[floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase : Tuple =np.asarray(snake_case)
_UpperCAmelCase : Optional[Any] =feat_extract(snake_case , return_tensors='np').input_values
_UpperCAmelCase : Dict =feat_extract(snake_case , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
@require_torch
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
import torch
_UpperCAmelCase : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase : int =np.random.rand(1_0_0).astype(np.floataa)
_UpperCAmelCase : str =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : Optional[Any] =feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_UpperCAmelCase : List[str] =feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
from datasets import load_dataset
_UpperCAmelCase : Optional[Any] =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
_UpperCAmelCase : int =ds.sort('id').select(range(snake_case))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
# fmt: off
_UpperCAmelCase : List[str] =torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69])
# fmt: on
_UpperCAmelCase : Dict =self._load_datasamples(1)
_UpperCAmelCase : Optional[Any] =ASTFeatureExtractor()
_UpperCAmelCase : Optional[Any] =feature_extractor(snake_case , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , snake_case , atol=1E-4))
| 331 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase_ : int = float("""nan""")
class __lowercase :
def __init__(self : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[Any]:
_lowercase : Any = sys.stdout
_lowercase : List[str] = open(snake_case , "a" )
def __getattr__(self : List[str] , snake_case : Tuple ) -> Dict:
return getattr(self.stdout , snake_case )
def _a(self : Tuple , snake_case : Optional[int] ) -> List[Any]:
self.stdout.write(snake_case )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" , "" , snake_case , 0 , re.M ) )
def UpperCamelCase ( _UpperCAmelCase : List[str]=80 , _UpperCAmelCase : List[Any]=False ) -> Tuple:
'''simple docstring'''
_lowercase : Any = []
# deal with critical env vars
_lowercase : Optional[Any] = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_lowercase : Tuple = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
_lowercase : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_lowercase : int = []
_lowercase : List[Any] = ""
while len(_UpperCAmelCase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
_lowercase : str = ""
return "\\\n".join(_UpperCAmelCase )
def UpperCamelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_lowercase : str = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
_lowercase : Optional[Any] = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
_lowercase : Union[str, Any] = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_lowercase : Dict = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / f"""log.{prefix}.stdout.txt""" , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / f"""log.{prefix}.stderr.txt""" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , "r" , encoding="utf-8" ) as f:
_lowercase : Tuple = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = []
_lowercase : Tuple = []
_lowercase : Optional[Any] = f"""{id}: {variation:<{longest_variation_len}}"""
_lowercase : Optional[int] = f"""{preamble}: """
_lowercase : Dict = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
_lowercase : str = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowercase : str = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
_lowercase : List[Any] = f"""\33[2K\r{outcome}"""
if len(_UpperCAmelCase ) > 0:
_lowercase : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_lowercase : int = round(mean_metrics[target_metric_key] , 2 )
_lowercase : List[Any] = f"""{outcome} {mean_target}"""
if len(_UpperCAmelCase ) > 1:
results_str += f""" {tuple(round(_UpperCAmelCase , 2 ) for x in results )}"""
print(_UpperCAmelCase )
_lowercase : str = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
_lowercase : str = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def UpperCamelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = pd.DataFrame(_UpperCAmelCase )
_lowercase : Dict = "variation"
_lowercase : Any = "diff_%"
_lowercase : Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_lowercase : Tuple = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
_lowercase : int = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
_lowercase : Optional[Any] = df.apply(
lambda _UpperCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_lowercase : int = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_lowercase : str = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
_lowercase : Any = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_lowercase : int = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
_lowercase : Optional[Any] = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
_lowercase : List[Any] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
_lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_lowercase : Union[str, Any] = parser.parse_args()
_lowercase : str = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
_lowercase : Optional[int] = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
_lowercase : Dict = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_lowercase : Optional[int] = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
_lowercase : List[str] = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
_lowercase : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_lowercase : str = f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
_lowercase : int = Tee(_UpperCAmelCase )
print(f"""\n*** Running {len(_UpperCAmelCase )} benchmarks:""" )
print(f"""Base command: {' '.join(_UpperCAmelCase )}""" )
_lowercase : str = "variation"
_lowercase : Dict = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
_lowercase : Any = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 461 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
def __init__(self : int , *snake_case : Optional[Any] , **snake_case : Optional[Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 461 | 1 |
def UpperCAmelCase_( a__ , a__ , a__ , a__ ):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if curr_ind == len(a__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(a__ ) ):
if valid_connection(a__ , a__ , a__ , a__ ):
# Insert current vertex into path as next transition
SCREAMING_SNAKE_CASE : str = next_ver
# Validate created path
if util_hamilton_cycle(a__ , a__ , curr_ind + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE : Optional[Any] = -1
return False
def UpperCAmelCase_( a__ , a__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [-1] * (len(a__ ) + 1)
# initialize start and end of path with starting index
SCREAMING_SNAKE_CASE : List[str] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(a__ , a__ , 1 ) else []
| 333 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_stages
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = out_features
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : str = num_stages
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCAmelCase ( self ) ->List[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = UperNetModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Optional[int]:
return
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = _config_zero_init(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
SCREAMING_SNAKE_CASE : int = Image.open(a__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
SCREAMING_SNAKE_CASE : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Any = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 333 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case : List[str] = 1
__snake_case : Any = 1
while repunit:
__snake_case : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase__( __UpperCAmelCase : int = 1_00_00_00 ):
__snake_case : Optional[int] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 576 | from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
def UpperCAmelCase__( __UpperCAmelCase : TreeNode | None ):
# Validation
def is_valid_tree(__UpperCAmelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__UpperCAmelCase ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
__UpperCAmelCase : TreeNode | None , __UpperCAmelCase : float , __UpperCAmelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __UpperCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __UpperCAmelCase )
)
return is_binary_search_tree_recursive_check(__UpperCAmelCase , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = "src/transformers"
_lowerCAmelCase = "docs/source/en/tasks"
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A_ : Optional[int] = f.readlines()
# Find the start prompt.
A_ : Any = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
A_ : List[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def __UpperCamelCase ( snake_case__ ):
A_ : int = TASK_GUIDE_TO_MODELS[task_guide]
A_ : Any = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
A_ : Any = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __UpperCamelCase ( snake_case__ , snake_case__=False ):
A_ , A_ , A_ , A_ : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
A_ : int = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 480 |
"""simple docstring"""
_lowerCAmelCase = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_lowerCAmelCase = ["a", "b", "c", "d", "e"]
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : int = start
# add current to visited
visited.append(snake_case__ )
A_ : Any = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
A_ : Optional[Any] = topological_sort(snake_case__ , snake_case__ , snake_case__ )
# if all neighbors visited add current to sort
sort.append(snake_case__ )
# if all vertices haven't been visited select a new one to visit
if len(snake_case__ ) != len(snake_case__ ):
for vertice in vertices:
if vertice not in visited:
A_ : Optional[Any] = topological_sort(snake_case__ , snake_case__ , snake_case__ )
# return sort
return sort
if __name__ == "__main__":
_lowerCAmelCase = topological_sort("a", [], [])
print(sort)
| 480 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowercase__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __snake_case ( lowercase : Tuple ):
snake_case_ = torch.load(lowercase , map_location="cpu" )
return sd
def __snake_case ( lowercase : str , lowercase : Tuple , lowercase : Tuple=rename_keys_prefix ):
snake_case_ = OrderedDict()
snake_case_ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case_ = key
for name_pair in rename_keys_prefix:
snake_case_ = new_key.replace(name_pair[0] , name_pair[1] )
snake_case_ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case_ = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def __snake_case ( lowercase : str , lowercase : Optional[Any] ):
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
snake_case_ = "pretraining"
if "vcr" in checkpoint_path:
snake_case_ = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
snake_case_ = {"visual_embedding_dim": 2_048}
elif "vqa" in checkpoint_path:
snake_case_ = {"visual_embedding_dim": 2_048}
elif "nlvr" in checkpoint_path:
snake_case_ = {"visual_embedding_dim": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
snake_case_ = {"visual_embedding_dim": 512}
snake_case_ = "multichoice"
elif "vqa_advanced" in checkpoint_path:
snake_case_ = {"visual_embedding_dim": 2_048}
snake_case_ = "vqa_advanced"
elif "vqa" in checkpoint_path:
snake_case_ = {"visual_embedding_dim": 2_048, "num_labels": 3_129}
snake_case_ = "vqa"
elif "nlvr" in checkpoint_path:
snake_case_ = {
"visual_embedding_dim": 1_024,
"num_labels": 2,
}
snake_case_ = "nlvr"
snake_case_ = VisualBertConfig(**lowercase )
# Load State Dict
snake_case_ = load_state_dict(lowercase )
snake_case_ = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
snake_case_ = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
snake_case_ = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
snake_case_ = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
snake_case_ = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowercase__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 508 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """data2vec-audio"""
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_="gelu" , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_=False , UpperCAmelCase_=16 , UpperCAmelCase_=19 , UpperCAmelCase_=5 , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="sum" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=2_56 , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCAmelCase_=(5, 3, 3, 1, 1) , UpperCAmelCase_=(1, 2, 3, 1, 1) , UpperCAmelCase_=5_12 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=3 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = feat_extract_activation
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = conv_pos_kernel_size
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = xvector_output_dim
@property
def _lowercase ( self ):
return math.prod(self.conv_stride )
| 508 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Any = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = vocab_size
A : Dict = hidden_size
A : Union[str, Any] = d_kv
A : Tuple = d_ff
A : Union[str, Any] = num_layers
A : List[str] = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Tuple = relative_attention_max_distance
A : Optional[int] = dropout_rate
A : int = layer_norm_epsilon
A : Tuple = initializer_factor
A : Optional[Any] = use_cache
A : List[str] = eos_token_id
A : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
A : Tuple = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : List[str] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[int] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : Any = hidden_size
A : List[Any] = patch_embed_hidden_size
A : str = d_ff
A : List[Any] = dropout_rate
A : Dict = num_hidden_layers
A : Any = num_attention_heads
A : Dict = initializer_range
A : Any = initializer_factor
A : str = attention_dropout
A : Optional[int] = layer_norm_eps
A : List[Any] = dense_act_fn
A : List[str] = seq_len
A : Dict = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : str = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : str = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Optional[Any] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : Tuple = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Dict = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Union[str, Any] = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Optional[int] = self.text_config.eos_token_id
A : Tuple = initializer_factor
A : List[Any] = initializer_range
A : int = self.initializer_range
A : int = self.initializer_range
A : List[str] = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = copy.deepcopy(self.__dict__ )
A : Union[str, Any] = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : List[Any] = self.__class__.model_type
return output
| 343 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = CpmAntTokenizer
__magic_name__ = False
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
A : Optional[int] = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
A : List[str] = '''今天天气真好!'''
A : Union[str, Any] = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
A : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = '''今天天气真好!'''
A : List[str] = [tokenizer.bos_token] + tokens
A : int = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
A : str = tokenizer.decode(SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 343 | 1 |
'''simple docstring'''
def _a (lowercase__ : int = 3 , lowercase__ : int = 7 , lowercase__ : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__snake_case = 0
__snake_case = 1
for current_denominator in range(1 , limit + 1 ):
__snake_case = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__snake_case = current_numerator
__snake_case = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 56 |
from ... import PretrainedConfig
__A : Optional[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__magic_name__ = 'nezha'
def __init__( self , snake_case_=2_1128 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=64 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_=0 , snake_case_=2 , snake_case_=3 , snake_case_=True , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = max_relative_position
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout
_A = use_cache
| 27 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42 # [batch_size x 3]
__SCREAMING_SNAKE_CASE = 42 # [batch_size x 3]
__SCREAMING_SNAKE_CASE = 42 # [batch_size x 3]
__SCREAMING_SNAKE_CASE = 42 # [batch_size x 3]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
def UpperCamelCase ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self ):
return torch.from_numpy(np.array([self.width, self.height],dtype=np.floataa ) )
def UpperCamelCase ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov],dtype=np.floataa ) )
def UpperCamelCase ( self ):
A__ = torch.arange(self.height * self.width )
A__ = torch.stack(
[
pixel_indices % self.width,
torch.div(__lowerCamelCase,self.width,rounding_mode='''trunc''' ),
],axis=1,)
return coords
@property
def UpperCamelCase ( self ):
A__ = self.shape
A__ = int(np.prod(__lowerCamelCase ) )
A__ = self.get_image_coords()
A__ = torch.broadcast_to(coords.unsqueeze(0 ),[batch_size * inner_batch_size, *coords.shape] )
A__ = self.get_camera_rays(__lowerCamelCase )
A__ = rays.view(__lowerCamelCase,inner_batch_size * self.height * self.width,2,3 )
return rays
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
A__ = coords.view(__lowerCamelCase,-1,2 )
A__ = self.resolution()
A__ = self.fov()
A__ = (flat.float() / (res - 1)) * 2 - 1
A__ = fracs * torch.tan(fov / 2 )
A__ = fracs.view(__lowerCamelCase,-1,2 )
A__ = (
self.z.view(__lowerCamelCase,1,3 )
+ self.x.view(__lowerCamelCase,1,3 ) * fracs[:, :, :1]
+ self.y.view(__lowerCamelCase,1,3 ) * fracs[:, :, 1:]
)
A__ = directions / directions.norm(dim=-1,keepdim=__lowerCamelCase )
A__ = torch.stack(
[
torch.broadcast_to(self.origin.view(__lowerCamelCase,1,3 ),[batch_size, directions.shape[1], 3] ),
directions,
],dim=2,)
return rays.view(__lowerCamelCase,*__lowerCamelCase,2,3 )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin,x=self.x,y=self.y,z=self.z,width=__lowerCamelCase,height=__lowerCamelCase,x_fov=self.x_fov,y_fov=self.y_fov,)
def UpperCamelCase__( UpperCamelCase__ : int )->List[str]:
A__ = []
A__ = []
A__ = []
A__ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
A__ = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
A__ = -z * 4
A__ = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
A__ = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 704 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=30,__lowerCamelCase=2,__lowerCamelCase=3,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=32,__lowerCamelCase=5,__lowerCamelCase=4,__lowerCamelCase=37,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=10,__lowerCamelCase=0.02,__lowerCamelCase=None,__lowerCamelCase=2,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__lowerCamelCase,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.type_sequence_label_size
A__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = ViTModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase,hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase,nn.Linear ) )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__( )->int:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
A__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__lowerCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__lowerCamelCase )
A__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''',size=480 )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(__lowerCamelCase,interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape,__lowerCamelCase )
A__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase ( self ):
A__ = ViTModel.from_pretrained('''facebook/dino-vits8''',torch_dtype=torch.floataa,device_map='''auto''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A__ = model(__lowerCamelCase )
| 212 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20}
UpperCAmelCase__ : List[str] = do_thumbnail
UpperCAmelCase__ : Optional[int] = do_align_axis
UpperCAmelCase__ : Union[str, Any] = do_pad
UpperCAmelCase__ : Tuple = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean
UpperCAmelCase__ : List[Any] = image_std
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = DonutImageProcessor if is_vision_available() else None
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self )
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"""do_resize""" ) )
self.assertTrue(hasattr(A ,"""size""" ) )
self.assertTrue(hasattr(A ,"""do_thumbnail""" ) )
self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) )
self.assertTrue(hasattr(A ,"""do_pad""" ) )
self.assertTrue(hasattr(A ,"""do_normalize""" ) )
self.assertTrue(hasattr(A ,"""image_mean""" ) )
self.assertTrue(hasattr(A ,"""image_std""" ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} )
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@is_flaky()
def __lowercase ( self : int ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def __lowercase ( self : List[str] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def __lowercase ( self : Any ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
| 65 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set."""
def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any:
'''simple docstring'''
lowercase__ : Any = Path(lowercase_ )
path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ : Dict = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowercase__ : Any = torch.cuda.device_count()
lowercase__ : Any = num_gpus
lowercase__ : Optional[int] = False
if num_gpus > 1:
lowercase__ : Tuple = """MULTI_GPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_xpu_available() and use_xpu:
lowercase__ : Union[str, Any] = torch.xpu.device_count()
lowercase__ : str = num_xpus
lowercase__ : List[Any] = False
if num_xpus > 1:
lowercase__ : str = """MULTI_XPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_npu_available():
lowercase__ : Tuple = torch.npu.device_count()
lowercase__ : Union[str, Any] = num_npus
lowercase__ : Union[str, Any] = False
if num_npus > 1:
lowercase__ : List[Any] = """MULTI_NPU"""
else:
lowercase__ : int = """NO"""
else:
lowercase__ : Union[str, Any] = 0
lowercase__ : str = True
lowercase__ : Union[str, Any] = 1
lowercase__ : int = """NO"""
lowercase__ : Tuple = ClusterConfig(**lowercase_ )
config.to_json_file(lowercase_ )
return path
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ )
parser.add_argument(
"""--config_file""" , default=lowercase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowercase_ )
return parser
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 12 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase = sys.version_info >= (3, 1_0)
def a__ ( a=None , a=None ) -> Optional[int]:
return field(default_factory=lambda: default , metadata=a )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = """titi"""
__magic_name__ = """toto"""
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = """titi"""
__magic_name__ = """toto"""
__magic_name__ = 42
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = "toto"
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = BasicEnum(self.foo )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = "toto"
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = None
__magic_name__ = field(default=A__ , metadata={"""help""": """help message"""} )
__magic_name__ = None
__magic_name__ = list_field(default=[] )
__magic_name__ = list_field(default=[] )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = list_field(default=[] )
__magic_name__ = list_field(default=[1, 2, 3] )
__magic_name__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__magic_name__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = field()
__magic_name__ = field()
__magic_name__ = field()
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicEnum(self.required_enum )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = field()
__magic_name__ = None
__magic_name__ = field(default="""toto""" , metadata={"""help""": """help message"""} )
__magic_name__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = None
__magic_name__ = field(default=A__ , metadata={"""help""": """help message"""} )
__magic_name__ = None
__magic_name__ = list_field(default=[] )
__magic_name__ = list_field(default=[] )
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : List[str] = {k: v for k, v in vars(__magic_name__ ).items() if k != '''container'''}
A_ : str = {k: v for k, v in vars(__magic_name__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __magic_name__ ) and yy.get('''choices''' , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__magic_name__ ) , yy['''type'''](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : str = HfArgumentParser(__magic_name__ )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__magic_name__ , required=__magic_name__ )
expected.add_argument('''--bar''' , type=__magic_name__ , required=__magic_name__ )
expected.add_argument('''--baz''' , type=__magic_name__ , required=__magic_name__ )
expected.add_argument('''--flag''' , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs='''?''' )
self.argparsersEqual(__magic_name__ , __magic_name__ )
A_ : Any = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) , ) : Optional[Any] = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(__magic_name__ )
A_ : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=__magic_name__ )
expected.add_argument('''--baz''' , default='''toto''' , type=__magic_name__ , help='''help message''' )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__magic_name__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=__magic_name__ , default=__magic_name__ )
A_ : List[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
A_ : str = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
A_ : str = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
A_ : Tuple = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
A_ : Any = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(__magic_name__ )
A_ : List[str] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : Any = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Optional[Any] = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Optional[Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase ( self ):
"""simple docstring"""
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = "toto"
A_ : Tuple = HfArgumentParser(__magic_name__ )
A_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : Any = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(__magic_name__ )
A_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__magic_name__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__magic_name__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : Optional[int] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__magic_name__ , type=__magic_name__ )
expected.add_argument('''--bar''' , default=__magic_name__ , type=__magic_name__ , help='''help message''' )
expected.add_argument('''--baz''' , default=__magic_name__ , type=__magic_name__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__magic_name__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__magic_name__ )
A_ : int = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
A_ : int = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
A_ : Dict = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[str] = HfArgumentParser(__magic_name__ )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__magic_name__ , required=__magic_name__ )
expected.add_argument('''--required_str''' , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Any = HfArgumentParser(__magic_name__ )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__magic_name__ , )
expected.add_argument('''--opt''' , type=__magic_name__ , default=__magic_name__ )
expected.add_argument('''--baz''' , default='''toto''' , type=__magic_name__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = HfArgumentParser(__magic_name__ )
A_ : List[Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : str = parser.parse_dict(__magic_name__ )[0]
A_ : Any = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = HfArgumentParser(__magic_name__ )
A_ : Optional[Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : str = HfArgumentParser(__magic_name__ )
A_ : Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Optional[int] = os.path.join(__magic_name__ , '''temp_json''' )
os.mkdir(__magic_name__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__magic_name__ , __magic_name__ )
A_ : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Any = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(__magic_name__ )
A_ : Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Dict = os.path.join(__magic_name__ , '''temp_yaml''' )
os.mkdir(__magic_name__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
A_ : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : Union[str, Any] = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[int] = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 236 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = """open-llama"""
def __init__( self , __magic_name__=10_0000 , __magic_name__=4096 , __magic_name__=1_1008 , __magic_name__=32 , __magic_name__=32 , __magic_name__="silu" , __magic_name__=2048 , __magic_name__=0.02 , __magic_name__=1e-6 , __magic_name__=True , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=False , __magic_name__=True , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=True , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
A_ : Union[str, Any] = vocab_size
A_ : int = max_position_embeddings
A_ : str = hidden_size
A_ : Optional[int] = intermediate_size
A_ : List[str] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Tuple = hidden_act
A_ : Union[str, Any] = initializer_range
A_ : Optional[int] = rms_norm_eps
A_ : Union[str, Any] = use_cache
A_ : Optional[int] = kwargs.pop(
'''use_memorry_efficient_attention''' , __magic_name__ )
A_ : int = hidden_dropout_prob
A_ : Any = attention_dropout_prob
A_ : Union[str, Any] = use_stable_embedding
A_ : Optional[Any] = shared_input_output_embedding
A_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ , )
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __magic_name__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
A_ : Tuple = self.rope_scaling.get('''type''' , __magic_name__ )
A_ : List[Any] = self.rope_scaling.get('''factor''' , __magic_name__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__magic_name__ , __magic_name__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 236 | 1 |
"""simple docstring"""
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A = os.path.join(git_repo_path, """src""", """transformers""")
__A = """
{0} = None
"""
__A = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__A = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(__UpperCAmelCase )
lowerCAmelCase__ :int = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(__UpperCAmelCase , 'tokenizers' )
lowerCAmelCase__ :List[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(__UpperCAmelCase , 'tensorflow_text' )
lowerCAmelCase__ :List[Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(__UpperCAmelCase , 'sentencepiece_and_tokenizers' )
lowerCAmelCase__ :str = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(__UpperCAmelCase , 'sentencepiece_and_tensorflow_text' )
lowerCAmelCase__ :Any = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(__UpperCAmelCase , 'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , __UpperCAmelCase )
self.assertIn('tensorflow_text' , __UpperCAmelCase )
self.assertIn('sentencepiece_and_tokenizers' , __UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(__UpperCAmelCase , '\nCONSTANT = None\n' )
lowerCAmelCase__ :Optional[int] = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
__UpperCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
lowerCAmelCase__ :Union[str, Any] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
lowerCAmelCase__ :Union[str, Any] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
lowerCAmelCase__ :Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , __UpperCAmelCase )
| 93 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = BertJapaneseTokenizer
UpperCamelCase_ : Any = False
UpperCamelCase_ : Tuple = True
def _A ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE : Tuple = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : str , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = 'こんにちは、世界。 \nこんばんは、世界。'
SCREAMING_SNAKE_CASE : List[str] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def _A ( self : Any , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_input_output_texts(_A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
return text, ids
def _A ( self : Any ):
pass # TODO add if relevant
def _A ( self : Dict ):
pass # TODO add if relevant
def _A ( self : List[str] ):
pass # TODO add if relevant
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(_A , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_A , "wb" ) as handle:
pickle.dump(_A , _A )
with open(_A , "rb" ) as handle:
SCREAMING_SNAKE_CASE : Optional[int] = pickle.load(_A )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Optional[Any] ):
try:
SCREAMING_SNAKE_CASE : Any = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Optional[int] ):
try:
SCREAMING_SNAKE_CASE : int = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = MecabTokenizer(do_lower_case=_A , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Dict ):
try:
SCREAMING_SNAKE_CASE : List[str] = MecabTokenizer(
do_lower_case=_A , normalize_text=_A , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = MecabTokenizer(normalize_text=_A , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(_A )
self.assertListEqual(_A , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_A , "wb" ) as handle:
pickle.dump(_A , _A )
with open(_A , "rb" ) as handle:
SCREAMING_SNAKE_CASE : Any = pickle.load(_A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
@require_sudachi
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _A ( self : int ):
SCREAMING_SNAKE_CASE : int = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(do_lower_case=_A , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = SudachiTokenizer(normalize_text=_A , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = SudachiTokenizer(trim_whitespace=_A , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(_A )
SCREAMING_SNAKE_CASE : Dict = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_A , "wb" ) as handle:
pickle.dump(_A , _A )
with open(_A , "rb" ) as handle:
SCREAMING_SNAKE_CASE : str = pickle.load(_A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
@require_jumanpp
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Dict = JumanppTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Any = JumanppTokenizer(normalize_text=_A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = JumanppTokenizer(trim_whitespace=_A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
SCREAMING_SNAKE_CASE : Any = {}
for i, token in enumerate(_A ):
SCREAMING_SNAKE_CASE : int = i
SCREAMING_SNAKE_CASE : Tuple = WordpieceTokenizer(vocab=_A , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
SCREAMING_SNAKE_CASE : int = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(_A , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(_A , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("ありがとう。" , add_special_tokens=_A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=_A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(_A , _A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = BertJapaneseTokenizer
UpperCamelCase_ : int = False
def _A ( self : str ):
super().setUp()
SCREAMING_SNAKE_CASE : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : Dict , **UpperCAmelCase_ : Dict ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **_A )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = 'こんにちは、世界。 \nこんばんは、世界。'
SCREAMING_SNAKE_CASE : int = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def _A ( self : List[Any] ):
pass # TODO add if relevant
def _A ( self : List[Any] ):
pass # TODO add if relevant
def _A ( self : List[Any] ):
pass # TODO add if relevant
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
_A , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
SCREAMING_SNAKE_CASE : Tuple = {}
for i, token in enumerate(_A ):
SCREAMING_SNAKE_CASE : int = i
SCREAMING_SNAKE_CASE : Dict = CharacterTokenizer(vocab=_A , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("ありがとう。" , add_special_tokens=_A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=_A )
SCREAMING_SNAKE_CASE : int = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A , _A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = 'cl-tohoku/bert-base-japanese'
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Any = 'cl-tohoku/bert-base-japanese'
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
SCREAMING_SNAKE_CASE : Tuple = 'bert-base-cased'
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 716 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase__ ( lowercase="no" , lowercase = default_json_config_file , lowercase = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Path(lowercase )
path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
SCREAMING_SNAKE_CASE : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count()
SCREAMING_SNAKE_CASE : int = num_gpus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE : Tuple = "MULTI_GPU"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = "NO"
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE : List[str] = torch.xpu.device_count()
SCREAMING_SNAKE_CASE : str = num_xpus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE : Any = "MULTI_XPU"
else:
SCREAMING_SNAKE_CASE : str = "NO"
elif is_npu_available():
SCREAMING_SNAKE_CASE : List[Any] = torch.npu.device_count()
SCREAMING_SNAKE_CASE : Optional[Any] = num_npus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_npus > 1:
SCREAMING_SNAKE_CASE : str = "MULTI_NPU"
else:
SCREAMING_SNAKE_CASE : int = "NO"
else:
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : int = "NO"
SCREAMING_SNAKE_CASE : Dict = ClusterConfig(**lowercase )
config.to_json_file(lowercase )
return path
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parser.add_parser("default" , parents=lowercase , help=lowercase , formatter_class=lowercase )
parser.add_argument(
"--config_file" , default=lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowercase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=lowercase )
return parser
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 488 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class __magic_name__ (__UpperCamelCase ):
'''simple docstring'''
__lowercase : List[str] = ['''pixel_values''']
def __init__( self:Tuple , _a:Any = True , _a:Dict = None , _a:str = PILImageResampling.BILINEAR , _a:int = True , _a:Optional[int] = None , _a:int = True , _a:Optional[int] = 1 / 2_55 , _a:Optional[Any] = True , _a:Optional[Any] = None , _a:Optional[int] = None , **_a:str , ):
super().__init__(**_a )
snake_case__ = size if size is not None else {'''shortest_edge''': 2_56}
snake_case__ = get_size_dict(_a , default_to_square=_a )
snake_case__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
snake_case__ = get_size_dict(_a )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Dict , _a:List[str] = PILImageResampling.BICUBIC , _a:Union[str, Any] = None , **_a:Tuple , ):
snake_case__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case__ = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Union[str, Any] , _a:Optional[Any] , _a:Any = None , **_a:Optional[Any] , ):
snake_case__ = get_size_dict(_a )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[int] , _a:List[Any] = None , **_a:str ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Optional[int] , _a:Tuple , _a:Dict , _a:str = None , **_a:Optional[Any] , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Dict , _a:List[Any] = None , _a:List[str] = None , _a:List[str] = None , _a:Dict = None , _a:List[str] = None , _a:Optional[int] = None , _a:Any = None , _a:int = None , _a:List[str] = None , _a:int = None , _a:str = None , _a:List[Any] = ChannelDimension.FIRST , **_a:List[Any] , ):
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(_a , default_to_square=_a )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(_a )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(_a ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
snake_case__ = [to_channel_dimension_format(_a , _a ) for image in images]
snake_case__ = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 33 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ByTaTokenizer
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
snake_case: Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case: List[str] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , SCREAMING_SNAKE_CASE__ ) )
snake_case: str = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
snake_case: Union[str, Any] = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
snake_case: Tuple = toks + toks
# toks_str = [t[1] for t in toks]
snake_case: Dict = [t[0] for t in toks]
# Ensure consistency
snake_case: int = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case: str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
snake_case: Tuple = ' ' + output_txt
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
snake_case: List[Any] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: Union[str, Any] = 'Unicode €.'
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'Unicode €.</s>' )
snake_case: List[Any] = tokenizer('e è é ê ë' )
snake_case: Optional[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.ta_base_tokenizer
snake_case: Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case: Optional[int] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
snake_case: Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case: Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.ta_base_tokenizer
snake_case: List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.ta_base_tokenizer
snake_case: str = [
'Summary of the text.',
'Another summary.',
]
snake_case: Dict = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='max_length' , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.ta_base_tokenizer
snake_case: Optional[int] = ['A long paragraph for summarization. </s>']
snake_case: str = ['Summary of the text. </s>']
# fmt: off
snake_case: str = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
snake_case: Optional[int] = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['input_ids'][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['labels'][0] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
snake_case: Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: Union[str, Any] = tempfile.mkdtemp()
snake_case: Dict = ' He is very happy, UNwant\u00E9d,running'
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Any = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: List[str] = tempfile.mkdtemp()
snake_case: str = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case: List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case: int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
snake_case: Union[str, Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
snake_case: Any = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
snake_case: str = json.load(SCREAMING_SNAKE_CASE__ )
snake_case: int = [F"""<extra_id_{i}>""" for i in range(1_25 )]
snake_case: Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case: str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case: Dict = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case: Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=SCREAMING_SNAKE_CASE__ )]
snake_case: Union[str, Any] = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Union[str, Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
snake_case: List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case: Dict = 0
snake_case: List[Any] = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [] )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 692 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCamelCase_ = model_name.find("patch" )
lowerCamelCase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
lowerCamelCase_ = XCLIPVisionConfig(patch_size=lowerCamelCase__ , num_frames=lowerCamelCase__ )
if "large" in model_name:
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 3_0_7_2
lowerCamelCase_ = 1_2
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 1_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
lowerCamelCase_ = 3_3_6
lowerCamelCase_ = XCLIPConfig.from_text_vision_configs(lowerCamelCase__ , lowerCamelCase__ )
if "large" in model_name:
lowerCamelCase_ = 7_6_8
return config
def lowerCamelCase_ ( lowerCamelCase__ ):
# text encoder
if name == "token_embedding.weight":
lowerCamelCase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
lowerCamelCase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
lowerCamelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCamelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCamelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCamelCase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
lowerCamelCase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
lowerCamelCase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
lowerCamelCase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
lowerCamelCase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
lowerCamelCase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
lowerCamelCase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
lowerCamelCase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
lowerCamelCase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
lowerCamelCase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
lowerCamelCase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
lowerCamelCase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
lowerCamelCase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
lowerCamelCase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
lowerCamelCase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
lowerCamelCase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
lowerCamelCase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "attn.in_proj" in key:
lowerCamelCase_ = key.split("." )
if key.startswith("visual" ):
lowerCamelCase_ = key_split[3]
lowerCamelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCamelCase_ = val[
:dim, :
]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[
-dim:, :
]
else:
lowerCamelCase_ = val[
:dim
]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCamelCase_ = val[
:dim, :
]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[
-dim:, :
]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[-dim:]
elif key.startswith("mit" ):
lowerCamelCase_ = key_split[2]
lowerCamelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = key_split[2]
lowerCamelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = rename_key(lowerCamelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCamelCase_ = val.T
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( lowerCamelCase__ ):
if num_frames == 8:
lowerCamelCase_ = "eating_spaghetti_8_frames.npy"
elif num_frames == 1_6:
lowerCamelCase_ = "eating_spaghetti.npy"
elif num_frames == 3_2:
lowerCamelCase_ = "eating_spaghetti_32_frames.npy"
lowerCamelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=lowerCamelCase__ , repo_type="dataset" , )
lowerCamelCase_ = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
lowerCamelCase_ = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
lowerCamelCase_ = model_to_url[model_name]
lowerCamelCase_ = 8
if "16-frames" in model_name:
lowerCamelCase_ = 1_6
elif "shot" in model_name:
lowerCamelCase_ = 3_2
lowerCamelCase_ = get_xclip_config(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = XCLIPModel(lowerCamelCase__ )
model.eval()
if "drive" in checkpoint_url:
lowerCamelCase_ = "pytorch_model.bin"
gdown.cached_download(lowerCamelCase__ , lowerCamelCase__ , quiet=lowerCamelCase__ )
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
else:
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ )["model"]
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = XCLIPModel(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCamelCase_ = 3_3_6 if model_name == "xclip-large-patch14-16-frames" else 2_2_4
lowerCamelCase_ = VideoMAEImageProcessor(size=lowerCamelCase__ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
lowerCamelCase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
lowerCamelCase_ = XCLIPProcessor(image_processor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
lowerCamelCase_ = prepare_video(lowerCamelCase__ )
lowerCamelCase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=lowerCamelCase__ , return_tensors="pt" , padding=lowerCamelCase__ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
# Verify outputs
lowerCamelCase_ = outputs.logits_per_video
lowerCamelCase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , lowerCamelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCamelCase_ = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCamelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCamelCase_ = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCamelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCamelCase_ = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCamelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCamelCase_ = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCamelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCamelCase_ = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCamelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCamelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCamelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCamelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCamelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCamelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCamelCase_ = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCamelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCamelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(lowerCamelCase__ , organization="nielsr" )
processor.push_to_hub(lowerCamelCase__ , organization="nielsr" )
slow_tokenizer.push_to_hub(lowerCamelCase__ , organization="nielsr" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 463 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'MCTCTFeatureExtractor'
lowerCAmelCase__ = 'AutoTokenizer'
def __init__( self , lowercase , lowercase ) -> str:
super().__init__(lowercase , lowercase )
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
def __call__( self , *lowercase , **lowercase ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase_ = kwargs.pop("raw_speech" )
else:
lowerCamelCase_ = kwargs.pop("audio" , lowercase )
lowerCamelCase_ = kwargs.pop("sampling_rate" , lowercase )
lowerCamelCase_ = kwargs.pop("text" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
lowerCamelCase_ = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_ = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase )
lowerCamelCase_ = kwargs.pop("input_features" , lowercase )
lowerCamelCase_ = kwargs.pop("labels" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if input_features is not None:
lowerCamelCase_ = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
if labels is not None:
lowerCamelCase_ = self.tokenizer.pad(lowercase , **lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_ = labels["input_ids"]
return input_features
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]:
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
| 463 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowercase :
def __init__( self , a , a=1_6 , a=1_3 , a=7 , a=1_4 , a=1_0 , a=1_9 , a=5 , a=4 , a=True , a=1_6 , a=2 , a=4 , a=4 , a="gelu" , a=0.1 , a=0.1 , a=[1, 2, 3, 4, 5] , a=2_5 , a=5 , ):
snake_case__ : int =d_model
snake_case__ : Dict =parent
snake_case__ : Tuple =batch_size
snake_case__ : str =prediction_length
snake_case__ : Optional[int] =context_length
snake_case__ : Tuple =cardinality
snake_case__ : Any =num_time_features
snake_case__ : Union[str, Any] =lags_sequence
snake_case__ : str =embedding_dimension
snake_case__ : Union[str, Any] =is_training
snake_case__ : Optional[Any] =hidden_size
snake_case__ : List[Any] =num_hidden_layers
snake_case__ : Dict =num_attention_heads
snake_case__ : Union[str, Any] =intermediate_size
snake_case__ : Any =hidden_act
snake_case__ : Any =hidden_dropout_prob
snake_case__ : Dict =attention_probs_dropout_prob
snake_case__ : Any =context_length
snake_case__ : int =prediction_length + label_length
snake_case__ : Tuple =label_length
snake_case__ : str =moving_average
snake_case__ : Optional[int] =autocorrelation_factor
def lowercase__ ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase__ ( self , a ):
snake_case__ : Optional[int] =config.context_length + max(config.lags_sequence )
snake_case__ : Union[str, Any] =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case__ : List[Any] =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case__ : Any =floats_tensor([self.batch_size, _past_length] )
snake_case__ : List[str] =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case__ : Union[str, Any] =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case__ : str =floats_tensor([self.batch_size, config.prediction_length] )
snake_case__ : Dict ={
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowercase__ ( self ):
snake_case__ : int =self.get_config()
snake_case__ : List[Any] =self.prepare_autoformer_inputs_dict(_lowercase )
return config, inputs_dict
def lowercase__ ( self ):
snake_case__ : Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self , a , a ):
snake_case__ : Tuple =AutoformerModel(config=_lowercase ).to(_lowercase ).eval()
snake_case__ : Tuple =model(**_lowercase )
snake_case__ : str =outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[str] =model.get_encoder()
encoder.save_pretrained(_lowercase )
snake_case__ : Union[str, Any] =AutoformerEncoder.from_pretrained(_lowercase ).to(_lowercase )
snake_case__ : Optional[Any] =model.create_network_inputs(**_lowercase )
snake_case__ : int =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case__ : Dict =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case__ : List[str] =encoder(inputs_embeds=_lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
snake_case__ : Dict =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case__ : int =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case__ : Any =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case__ : List[str] =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[Any] =model.get_decoder()
decoder.save_pretrained(_lowercase )
snake_case__ : int =AutoformerDecoder.from_pretrained(_lowercase ).to(_lowercase )
snake_case__ : int =decoder(
trend=_lowercase , inputs_embeds=_lowercase , encoder_hidden_states=_lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_a : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_a : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_a : Union[str, Any] = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
_a : Tuple = False
_a : Any = False
_a : int = False
_a : Dict = False
_a : List[Any] = False
_a : int = False
def lowercase__ ( self ):
snake_case__ : Dict =AutoformerModelTester(self )
snake_case__ : Dict =ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowercase__ ( self ):
self.config_tester.run_common_tests()
def lowercase__ ( self ):
snake_case__ : List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] =model_class(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
snake_case__ : str =model_class.from_pretrained(_lowercase , output_loading_info=_lowercase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowercase__ ( self ):
snake_case__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowercase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowercase__ ( self ):
pass
def lowercase__ ( self ):
snake_case__ : Tuple =inspect.signature(getattr(_lowercase , """forward""" ) )
# The main input is the name of the argument after `self`
snake_case__ : str =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _lowercase )
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple =model_class(_lowercase )
snake_case__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[Any] =[*signature.parameters.keys()]
snake_case__ : Dict =[
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(_lowercase )] , _lowercase )
def lowercase__ ( self ):
snake_case__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] =True
snake_case__ : Optional[int] =getattr(self.model_tester , """seq_length""" , _lowercase )
snake_case__ : Optional[int] =getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
snake_case__ : Optional[int] =getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
snake_case__ : List[str] =getattr(self.model_tester , """d_model""" , _lowercase )
snake_case__ : Union[str, Any] =getattr(self.model_tester , """num_attention_heads""" , _lowercase )
snake_case__ : List[Any] =d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case__ : int =True
snake_case__ : Union[str, Any] =False
snake_case__ : List[Any] =True
snake_case__ : int =model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
snake_case__ : int =model(**self._prepare_for_class(_lowercase , _lowercase ) )
snake_case__ : Optional[Any] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Optional[int] =True
snake_case__ : Optional[int] =model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
snake_case__ : List[str] =model(**self._prepare_for_class(_lowercase , _lowercase ) )
snake_case__ : int =outputs.encoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case__ : Dict =len(_lowercase )
snake_case__ : int =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_lowercase , _lowercase )
# decoder attentions
snake_case__ : Dict =outputs.decoder_attentions
self.assertIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case__ : Optional[Any] =outputs.cross_attentions
self.assertIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case__ : Any =True
snake_case__ : Any =True
snake_case__ : List[Any] =model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
snake_case__ : List[str] =model(**self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + 2 , len(_lowercase ) )
snake_case__ : Optional[int] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase__ ( self ):
super().test_retain_grad_hidden_states_attentions()
def A__ ( _a : Tuple="train-batch.pt" ):
'''simple docstring'''
snake_case__ : Any =hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCamelCase , repo_type="""dataset""" )
snake_case__ : int =torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class _lowercase ( unittest.TestCase ):
def lowercase__ ( self ):
snake_case__ : Dict =AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowercase )
snake_case__ : int =prepare_batch()
with torch.no_grad():
snake_case__ : List[Any] =model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
snake_case__ : Optional[int] =torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _lowercase )
snake_case__ : Any =torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self ):
snake_case__ : Optional[int] =AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowercase )
snake_case__ : List[str] =prepare_batch("""val-batch.pt""" )
with torch.no_grad():
snake_case__ : str =model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
snake_case__ : Optional[int] =torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _lowercase )
snake_case__ : str =torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self ):
snake_case__ : Tuple =AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowercase )
snake_case__ : Optional[Any] =prepare_batch("""val-batch.pt""" )
with torch.no_grad():
snake_case__ : Tuple =model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
snake_case__ : str =torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _lowercase )
snake_case__ : List[Any] =torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_lowercase )
snake_case__ : List[Any] =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _lowercase , rtol=1e-1 ) )
| 719 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__lowerCamelCase : Any = namedtuple("""covid_data""", """cases deaths recovered""")
def A__ ( _a : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
snake_case__ : Any ="""//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(_a ).content ).xpath(_a ) )
__lowerCamelCase : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 448 | 0 |
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> List[Any]:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
move_disk(_UpperCamelCase , _UpperCamelCase)
move_tower(height - 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
print('moving disk from' , _UpperCamelCase , 'to' , _UpperCamelCase)
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = int(input('Height of hanoi: ').strip())
move_tower(_UpperCamelCase , 'A' , 'B' , 'C')
if __name__ == "__main__":
main()
| 280 |
from collections.abc import Sequence
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase))
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float:
"""simple docstring"""
UpperCamelCase = 0.0
for coeff in reversed(_UpperCamelCase):
UpperCamelCase = result * x + coeff
return result
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
__magic_name__ : List[str] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 280 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
_SCREAMING_SNAKE_CASE : Any = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , a_ )
if matches:
_SCREAMING_SNAKE_CASE : List[Any] = float(matches[1] )
_SCREAMING_SNAKE_CASE : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_SCREAMING_SNAKE_CASE : List[str] = 1_001
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''imagenet-1k-id2label.json'''
_SCREAMING_SNAKE_CASE : Optional[int] = '''huggingface/label-files'''
_SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE : Any = {int(a_ ) + 1: v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : int = '''background'''
_SCREAMING_SNAKE_CASE : str = idalabel
_SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_()-> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]:
_SCREAMING_SNAKE_CASE : int = get_mobilenet_va_config(a_ )
# Load 🤗 model
_SCREAMING_SNAKE_CASE : Any = MobileNetVaForImageClassification(a_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(a_ , a_ , a_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_SCREAMING_SNAKE_CASE : Tuple = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
_SCREAMING_SNAKE_CASE : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE : Optional[int] = model(**a_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
_SCREAMING_SNAKE_CASE : str = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , a_ , atol=1e-4 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a_ )
if push_to_hub:
print("""Pushing to the hub...""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''google/''' + model_name
image_processor.push_to_hub(a_ )
model.push_to_hub(a_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 713 | """simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T
_SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def lowerCamelCase_()-> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 635 | 0 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase : List[str] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
_UpperCamelCase : Union[str, Any] = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
_UpperCamelCase : int = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def __snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def __snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ):
__UpperCAmelCase = simple_accuracy(lowerCAmelCase , lowerCAmelCase )
__UpperCAmelCase = float(fa_score(y_true=lowerCAmelCase , y_pred=lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __snake_case ( lowerCAmelCase : Any , lowerCAmelCase : str ):
__UpperCAmelCase = np.array(lowerCAmelCase )
__UpperCAmelCase = np.array(lowerCAmelCase )
__UpperCAmelCase = en_sentvecs.shape[0]
# mean centering
__UpperCAmelCase = en_sentvecs - np.mean(lowerCAmelCase , axis=0 )
__UpperCAmelCase = in_sentvecs - np.mean(lowerCAmelCase , axis=0 )
__UpperCAmelCase = cdist(lowerCAmelCase , lowerCAmelCase , 'cosine' )
__UpperCAmelCase = np.array(range(lowerCAmelCase ) )
__UpperCAmelCase = sim.argsort(axis=1 )[:, :10]
__UpperCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowercase( datasets.Metric ):
"""simple docstring"""
def snake_case ( self: int ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def snake_case ( self: Optional[int] ,a: str ,a: str ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(a ,a )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(a ,a )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(a ,a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 396 | '''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase : int = 'docs/source/en/_toctree.yml'
def __snake_case ( lowerCAmelCase : Union[str, Any] ):
__UpperCAmelCase = defaultdict(lowerCAmelCase )
__UpperCAmelCase = []
__UpperCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCAmelCase )
__UpperCAmelCase = new_doc_list
__UpperCAmelCase = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase = []
for duplicate_key in duplicates:
__UpperCAmelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCAmelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__UpperCAmelCase = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCAmelCase )
# Sort
return overview_doc
def __snake_case ( lowerCAmelCase : Union[str, Any]=False ):
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase = content[api_idx]['sections']
# Then to the model doc
__UpperCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__UpperCAmelCase = api_doc[scheduler_idx]['sections']
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
__UpperCAmelCase = False
if new_scheduler_doc != scheduler_doc:
__UpperCAmelCase = True
if overwrite:
__UpperCAmelCase = new_scheduler_doc
if diff:
if overwrite:
__UpperCAmelCase = api_doc
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase , allow_unicode=lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __snake_case ( lowerCAmelCase : Tuple=False ):
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase = content[api_idx]['sections']
# Then to the model doc
__UpperCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__UpperCAmelCase = False
__UpperCAmelCase = api_doc[pipeline_idx]['sections']
__UpperCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__UpperCAmelCase = pipeline_doc['section']
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
if overwrite:
__UpperCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase )
# sort overall pipeline doc
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
__UpperCAmelCase = True
if overwrite:
__UpperCAmelCase = new_pipeline_docs
if diff:
if overwrite:
__UpperCAmelCase = api_doc
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase , allow_unicode=lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 396 | 1 |
from __future__ import annotations
import requests
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__SCREAMING_SNAKE_CASE ).json()
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] = 10 ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
__lowerCamelCase = requests.get(__SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(__SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] = 10 ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = hackernews_top_stories(__SCREAMING_SNAKE_CASE )
return "\n".join('* [{title}]({url})'.format(**__SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 704 |
import requests
__A = "" # <-- Put your OpenWeatherMap appid here!
__A = "https://api.openweathermap.org/data/2.5/"
def lowerCamelCase_ ( UpperCamelCase__ : str = "Chicago" , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase_ ( UpperCamelCase__ : str = "Kolkata, India" , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase_ ( UpperCamelCase__ : float = 55.68 , UpperCamelCase__ : float = 12.57 , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__A = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 167 | 0 |
"""simple docstring"""
_lowercase : str = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 49 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 3 ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__a : Dict = QuantumRegister(_SCREAMING_SNAKE_CASE , 'qr' )
__a : List[str] = ClassicalRegister(_SCREAMING_SNAKE_CASE , 'cr' )
__a : List[Any] = QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = number_of_qubits
for i in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
__a : Union[str, Any] = Aer.get_backend('qasm_simulator' )
__a : Any = execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=10_000 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 476 | 0 |
"""simple docstring"""
import os
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
with open(os.path.dirname(_A ) + """/p022_names.txt""" ) as file:
lowercase_ = str(file.readlines()[0] )
lowercase_ = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
lowercase_ = 0
lowercase_ = 0
for i, name in enumerate(_A ):
for letter in name:
name_score += ord(_A ) - 64
total_score += (i + 1) * name_score
lowercase_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> Any:
'''simple docstring'''
__snake_case = WavaVecaForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''projector.weight''']
__snake_case = downstream_dict['''projector.bias''']
__snake_case = downstream_dict['''model.post_net.linear.weight''']
__snake_case = downstream_dict['''model.post_net.linear.bias''']
return model
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int )-> Any:
'''simple docstring'''
__snake_case = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''model.linear.weight''']
__snake_case = downstream_dict['''model.linear.bias''']
return model
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] )-> Tuple:
'''simple docstring'''
__snake_case = WavaVecaForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''connector.weight''']
__snake_case = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__snake_case = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__snake_case = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )
__snake_case = checkpoint['''Downstream''']
__snake_case = WavaVecaConfig.from_pretrained(_lowerCamelCase )
__snake_case = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
__snake_case = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__snake_case = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
__snake_case = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('''ForXVector''' ):
__snake_case = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__snake_case = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : int = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 238 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase_ = size if size is not None else {"""shortest_edge""": 384}
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase_ = do_resize
lowercase_ = size
# Default value set here for backwards compatibility where the value in config is None
lowercase_ = crop_pct if crop_pct is not None else 224 / 256
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : float , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowercase_ = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase_ = int(shortest_edge / crop_pct )
lowercase_ = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
lowercase_ = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[str] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 603 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__snake_case = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__snake_case = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__snake_case = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 603 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__UpperCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase : List[Any] = 256
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[str] = ['melgan']
def __init__( self : Dict , _lowerCAmelCase : SpectrogramNotesEncoder , _lowerCAmelCase : SpectrogramContEncoder , _lowerCAmelCase : TaFilmDecoder , _lowerCAmelCase : DDPMScheduler , _lowerCAmelCase : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
__lowercase = math.log(1e-5 ) # Matches MelGAN training.
__lowercase = 4.0 # Largest value for most examples
__lowercase = 128
self.register_modules(
notes_encoder=_lowerCAmelCase , continuous_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase , scheduler=_lowerCAmelCase , melgan=_lowerCAmelCase , )
def _a ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple=(-1.0, 1.0) , _lowerCAmelCase : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = output_range
if clip:
__lowercase = torch.clip(_lowerCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
__lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _a ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]=(-1.0, 1.0) , _lowerCAmelCase : Tuple=False ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = input_range
__lowercase = torch.clip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if clip else outputs
# Scale to [0, 1].
__lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _a ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = input_tokens > 0
__lowercase , __lowercase = self.notes_encoder(
encoder_input_tokens=_lowerCAmelCase , encoder_inputs_mask=_lowerCAmelCase )
__lowercase , __lowercase = self.continuous_encoder(
encoder_inputs=_lowerCAmelCase , encoder_inputs_mask=_lowerCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _a ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = noise_time
if not torch.is_tensor(_lowerCAmelCase ):
__lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowerCAmelCase ) and len(timesteps.shape ) == 0:
__lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowercase = self.decoder(
encodings_and_masks=_lowerCAmelCase , decoder_input_tokens=_lowerCAmelCase , decoder_noise_time=_lowerCAmelCase )
return logits
@torch.no_grad()
def __call__( self : Tuple , _lowerCAmelCase : List[List[int]] , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : int = 100 , _lowerCAmelCase : bool = True , _lowerCAmelCase : str = "numpy" , _lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_lowerCAmelCase )}.' )
__lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
__lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowerCAmelCase ):
if i == 0:
__lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowercase = ones
__lowercase = self.scale_features(
_lowerCAmelCase , output_range=[-1.0, 1.0] , clip=_lowerCAmelCase )
__lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowerCAmelCase , continuous_mask=_lowerCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowerCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowercase = self.decode(
encodings_and_masks=_lowerCAmelCase , input_tokens=_lowerCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowercase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = self.scale_to_features(_lowerCAmelCase , input_range=[-1.0, 1.0] )
__lowercase = mel[:1]
__lowercase = mel.cpu().float().numpy()
__lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase , _lowerCAmelCase )
logger.info("""Generated segment""" , _lowerCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
__lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowerCAmelCase )
| 80 |
'''simple docstring'''
import os
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def a ( UpperCamelCase_ : str ) -> int:
snake_case__ =0
snake_case__ =0
while index < len(UpperCamelCase_ ) - 1:
snake_case__ =SYMBOLS[numerals[index]]
snake_case__ =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a ( UpperCamelCase_ : int ) -> str:
snake_case__ =''
snake_case__ =num // 1000
numerals += m_count * "M"
num %= 1000
snake_case__ =num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
snake_case__ =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a ( UpperCamelCase_ : str = "/p089_roman.txt" ) -> int:
snake_case__ =0
with open(os.path.dirname(UpperCamelCase_ ) + roman_numerals_filename ) as filea:
snake_case__ =filea.readlines()
for line in lines:
snake_case__ =line.strip()
snake_case__ =parse_roman_numerals(UpperCamelCase_ )
snake_case__ =generate_roman_numerals(UpperCamelCase_ )
savings += len(UpperCamelCase_ ) - len(UpperCamelCase_ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 538 | 0 |
from itertools import count
def _lowercase ( UpperCamelCase_ = 50 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1] * min_block_length
for n in count(UpperCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 400 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict =["""input_features""", """is_longer"""]
def __init__( self : Any , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : List[str]=48000 , UpperCAmelCase_ : List[Any]=480 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 14000 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : str = "fusion" , UpperCAmelCase_ : str = "repeatpad" , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = top_db
SCREAMING_SNAKE_CASE__ = truncation
SCREAMING_SNAKE_CASE__ = padding
SCREAMING_SNAKE_CASE__ = fft_window_size
SCREAMING_SNAKE_CASE__ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = max_length_s
SCREAMING_SNAKE_CASE__ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = frequency_min
SCREAMING_SNAKE_CASE__ = frequency_max
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm=UpperCAmelCase_ , mel_scale='htk' , )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm='slaney' , mel_scale='slaney' , )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : Optional[np.array] = None ):
SCREAMING_SNAKE_CASE__ = spectrogram(
UpperCAmelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE__ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE__ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE__ = torch.nn.functional.interpolate(
UpperCAmelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) - max_length
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE__ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE__ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE__ = False
else:
SCREAMING_SNAKE_CASE__ = self._random_mel_fusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
SCREAMING_SNAKE_CASE__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.pad(UpperCAmelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE__ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE__ = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE__ = [
self._get_input_mel(UpperCAmelCase_ , max_length if max_length else self.nb_max_samples , UpperCAmelCase_ , UpperCAmelCase_ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for mel, longer in padded_inputs:
input_mel.append(UpperCAmelCase_ )
is_longer.append(UpperCAmelCase_ )
if truncation == "fusion" and sum(UpperCAmelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = True
if isinstance(input_mel[0] , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE__ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE__ = {'input_features': input_mel, 'is_longer': is_longer}
SCREAMING_SNAKE_CASE__ = BatchFeature(UpperCAmelCase_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = input_features.convert_to_tensors(UpperCAmelCase_ )
return input_features
| 400 | 1 |
"""simple docstring"""
import random
def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = a[left_index]
_lowerCamelCase : Dict = left_index + 1
for j in range(left_index + 1 , __snake_case ):
if a[j] < pivot:
_lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j]
i += 1
_lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index]
return i - 1
def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ):
"""simple docstring"""
if left < right:
_lowerCamelCase : Any = random.randint(__snake_case , right - 1 )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case )
quick_sort_random(
__snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip()
_lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )]
quick_sort_random(__snake_case , 0 , len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main()
| 88 |
"""simple docstring"""
def _snake_case ( __snake_case : str , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : str = len(__snake_case )
_lowerCamelCase : Union[str, Any] = len(__snake_case )
_lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase : Union[str, Any] = True
for i in range(__snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase : Tuple = True
if a[i].islower():
_lowerCamelCase : Tuple = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''efficientformer'''
def __init__( self ,lowerCamelCase_ = [3, 2, 6, 4] ,lowerCamelCase_ = [4_8, 9_6, 2_2_4, 4_4_8] ,lowerCamelCase_ = [True, True, True, True] ,lowerCamelCase_ = 4_4_8 ,lowerCamelCase_ = 3_2 ,lowerCamelCase_ = 4 ,lowerCamelCase_ = 7 ,lowerCamelCase_ = 5 ,lowerCamelCase_ = 8 ,lowerCamelCase_ = 4 ,lowerCamelCase_ = 0.0 ,lowerCamelCase_ = 1_6 ,lowerCamelCase_ = 3 ,lowerCamelCase_ = 3 ,lowerCamelCase_ = 3 ,lowerCamelCase_ = 2 ,lowerCamelCase_ = 1 ,lowerCamelCase_ = 0.0 ,lowerCamelCase_ = 1 ,lowerCamelCase_ = True ,lowerCamelCase_ = True ,lowerCamelCase_ = 1E-5 ,lowerCamelCase_ = "gelu" ,lowerCamelCase_ = 0.02 ,lowerCamelCase_ = 1E-12 ,lowerCamelCase_ = 2_2_4 ,lowerCamelCase_ = 1E-05 ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = hidden_act
A = hidden_dropout_prob
A = hidden_sizes
A = num_hidden_layers
A = num_attention_heads
A = initializer_range
A = layer_norm_eps
A = patch_size
A = num_channels
A = depths
A = mlp_expansion_ratio
A = downsamples
A = dim
A = key_dim
A = attention_ratio
A = resolution
A = pool_size
A = downsample_patch_size
A = downsample_stride
A = downsample_pad
A = drop_path_rate
A = num_metaad_blocks
A = distillation
A = use_layer_scale
A = layer_scale_init_value
A = image_size
A = batch_norm_eps
| 617 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = list(_a )
A = list(_a )
A = 0
for i in range(len(_a ) ):
if lista[i] != lista[i]:
count += 1
A = """_"""
if count > 1:
return False
else:
return "".join(_a )
def _A ( _a : list[str] ):
"""simple docstring"""
A = []
while True:
A = ["""$"""] * len(_a )
A = []
for i in range(len(_a ) ):
for j in range(i + 1 , len(_a ) ):
A = compare_string(binary[i] , binary[j] )
if k is False:
A = """*"""
A = """*"""
temp.append("""X""" )
for i in range(len(_a ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_a ) == 0:
return pi
A = list(set(_a ) )
def _A ( _a : int , _a : Sequence[float] ):
"""simple docstring"""
A = []
for minterm in minterms:
A = """"""
for _ in range(_a ):
A = str(minterm % 2 ) + string
minterm //= 2
temp.append(_a )
return temp
def _A ( _a : str , _a : str , _a : int ):
"""simple docstring"""
A = list(_a )
A = list(_a )
A = 0
for i in range(len(_a ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _A ( _a : list[list[int]] , _a : list[str] ):
"""simple docstring"""
A = []
A = [0] * len(_a )
for i in range(len(chart[0] ) ):
A = 0
A = -1
for j in range(len(_a ) ):
if chart[j][i] == 1:
count += 1
A = j
if count == 1:
A = 1
for i in range(len(_a ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_a ) ):
A = 0
temp.append(prime_implicants[i] )
while True:
A = 0
A = -1
A = 0
for i in range(len(_a ) ):
A = chart[i].count(1 )
if count_n > max_n:
A = count_n
A = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_a ) ):
A = 0
def _A ( _a : list[str] , _a : list[str] ):
"""simple docstring"""
A = [[0 for x in range(len(_a ) )] for x in range(len(_a ) )]
for i in range(len(_a ) ):
A = prime_implicants[i].count("""_""" )
for j in range(len(_a ) ):
if is_for_table(prime_implicants[i] , binary[j] , _a ):
A = 1
return chart
def _A ( ):
"""simple docstring"""
A = int(input("""Enter the no. of variables\n""" ) )
A = [
float(_a )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
A = decimal_to_binary(_a , _a )
A = check(_a )
print("""Prime Implicants are:""" )
print(_a )
A = prime_implicant_chart(_a , _a )
A = selection(_a , _a )
print("""Essential Prime Implicants are:""" )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 617 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class UpperCAmelCase ( __lowercase ):
A__ : Dict = '''openai-gpt'''
A__ : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , __lowerCamelCase : str=4_0_4_7_8 , __lowerCamelCase : str=5_1_2 , __lowerCamelCase : Any=7_6_8 , __lowerCamelCase : Any=1_2 , __lowerCamelCase : Any=1_2 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : List[Any]="cls_index" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=0.1 , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = n_positions
_snake_case = n_embd
_snake_case = n_layer
_snake_case = n_head
_snake_case = afn
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_first_dropout
_snake_case = summary_proj_to_labels
super().__init__(**__a )
| 720 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Optional[int]:
_snake_case = '''backbone.''' if is_semantic else ''''''
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(f"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(f"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(f"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
for i in range(config.num_hidden_layers ):
_snake_case = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
_snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
_snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
_snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = q_bias
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_snake_case = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
_snake_case = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
_snake_case = gamma_a
_snake_case = gamma_a
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = dct.pop(lowerCAmelCase_ )
_snake_case = val
def snake_case ( ) -> Any:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]:
_snake_case = False if '''rvlcdip''' in checkpoint_url else True
_snake_case = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_snake_case = 1024
_snake_case = 4096
_snake_case = 24
_snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
_snake_case = 16
_snake_case = '''huggingface/label-files'''
_snake_case = '''rvlcdip-id2label.json'''
_snake_case = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_snake_case = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' )['''model''']
_snake_case = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
_snake_case = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
_snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
_snake_case = encoding['''pixel_values''']
_snake_case = model(lowerCAmelCase_ )
_snake_case = outputs.logits
# verify logits
_snake_case = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
_snake_case = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
_snake_case = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 404 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _A (__a , __a ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 512 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_activation('''gelu''')
self.assertTrue(torch.allclose(gelu_python(lowercase_) , torch_builtin(lowercase_)))
self.assertFalse(torch.allclose(gelu_python(lowercase_) , gelu_new(lowercase_)))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_ : Dict = get_activation('''gelu''')
SCREAMING_SNAKE_CASE_ : Tuple = get_activation('''gelu_10''')
SCREAMING_SNAKE_CASE_ : Any = torch_builtin(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = geluaa(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(lowercase_).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
get_activation('''gelu''')
get_activation('''gelu_10''')
get_activation('''gelu_fast''')
get_activation('''gelu_new''')
get_activation('''gelu_python''')
get_activation('''gelu_pytorch_tanh''')
get_activation('''linear''')
get_activation('''mish''')
get_activation('''quick_gelu''')
get_activation('''relu''')
get_activation('''sigmoid''')
get_activation('''silu''')
get_activation('''swish''')
get_activation('''tanh''')
with self.assertRaises(lowercase_):
get_activation('''bogus''')
with self.assertRaises(lowercase_):
get_activation(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = get_activation('''gelu''')
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : List[Any] = get_activation('''gelu''')
self.assertEqual(acta.a , 1)
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = acta.a
| 512 | 1 |
import requests
def lowercase__ ( __A: str ,__A: str ):
'''simple docstring'''
__magic_name__ : Tuple = {'''Content-Type''': '''application/json'''}
__magic_name__ : Union[str, Any] = requests.post(__A ,json={'''text''': message_body} ,headers=__A )
if response.status_code != 2_0_0:
__magic_name__ : int = (
'''Request to slack returned an error '''
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__A )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 501 |
import sys
import turtle
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ,__A: tuple[float, float] ,__A: int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowerCamelCase : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 501 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Any = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'blip_text_model'
def __init__( self , a__=3_0524 , a__=768 , a__=768 , a__=3072 , a__=768 , a__=12 , a__=8 , a__=512 , a__="gelu" , a__=1e-12 , a__=0.0 , a__=0.0 , a__=0.02 , a__=3_0522 , a__=2 , a__=0 , a__=102 , a__=True , a__=True , **a__ , ) -> Any:
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
A = vocab_size
A = hidden_size
A = encoder_hidden_size
A = intermediate_size
A = projection_dim
A = hidden_dropout_prob
A = num_hidden_layers
A = num_attention_heads
A = max_position_embeddings
A = layer_norm_eps
A = hidden_act
A = initializer_range
A = attention_probs_dropout_prob
A = is_decoder
A = use_cache
@classmethod
def _UpperCAmelCase ( cls , a__ , **a__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(a__ )
A , A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'blip_vision_model'
def __init__( self , a__=768 , a__=3072 , a__=512 , a__=12 , a__=12 , a__=384 , a__=16 , a__="gelu" , a__=1e-5 , a__=0.0 , a__=1e-10 , **a__ , ) -> Dict:
super().__init__(**a__ )
A = hidden_size
A = intermediate_size
A = projection_dim
A = num_hidden_layers
A = num_attention_heads
A = patch_size
A = image_size
A = initializer_range
A = attention_dropout
A = layer_norm_eps
A = hidden_act
@classmethod
def _UpperCAmelCase ( cls , a__ , **a__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(a__ )
A , A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
A = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'blip'
lowerCAmelCase = True
def __init__( self , a__=None , a__=None , a__=512 , a__=2.65_92 , a__=256 , **a__ , ) -> Optional[int]:
super().__init__(**a__ )
if text_config is None:
A = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
A = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
A = BlipTextConfig(**a__ )
A = BlipVisionConfig(**a__ )
A = self.vision_config.hidden_size
A = projection_dim
A = logit_scale_init_value
A = 1.0
A = 0.02
A = image_text_hidden_size
@classmethod
def _UpperCAmelCase ( cls , a__ , a__ , **a__ ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def _UpperCAmelCase ( self ) -> str:
A = copy.deepcopy(self.__dict__ )
A = self.text_config.to_dict()
A = self.vision_config.to_dict()
A = self.__class__.model_type
return output
| 641 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
A = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
return image
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Dict:
"""simple docstring"""
A = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A = dct.pop(UpperCamelCase__ )
A = val
def _lowerCAmelCase ( UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ) -> Tuple:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
A = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
A = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
A = qkv_bias
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Tuple ) -> Dict:
"""simple docstring"""
A = 3_64 if """coco""" in model_name else 2_24
A = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
A = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
A = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
A = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Tuple=False ) -> List[str]:
"""simple docstring"""
A = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
A = tokenizer("""\n""" , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A , A = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
A = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
A , A = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
A = """cuda""" if torch.cuda.is_available() else """cpu"""
A , A , A = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
A = original_model.state_dict()
A = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A = state_dict.pop(UpperCamelCase__ )
if key.startswith("""Qformer.bert""" ):
A = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
A = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
A = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
A = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
A = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
A = key.replace("""t5""" , """language""" )
A = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
A , A = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A = load_demo_image()
A = vis_processors["""eval"""](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
# create processor
A = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
A = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A = processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
A = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
A = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
A = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
A = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
A = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=UpperCamelCase__ )
else:
# cast to same type
A = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
A = """"""
A = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
A = original_model.generate({"""image""": original_pixel_values} )
A = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , UpperCamelCase__ )
A = input_ids.shape[1]
A = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
A = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
_lowercase : List[str] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
A : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
A : str = "text"
A : str = "summary"
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 508 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
A : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
A : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
A : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
A : Optional[int] = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
A : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
A : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
A : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Source language id for translation.'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Target language id for translation.'''} )
A : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: List[str] ,__UpperCamelCase: Dict ):
"""simple docstring"""
logger.info(f"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(f" {key} = {metrics[key]}" )
save_json(__UpperCamelCase ,os.path.join(__UpperCamelCase ,f"{split}_results.json" ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
check_output_dir(__UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' ,__UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : List[str] = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
assert hasattr(__UpperCamelCase ,__UpperCamelCase ), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(__UpperCamelCase ,__UpperCamelCase ,getattr(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf='.ckpt' in model_args.model_name_or_path ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(__UpperCamelCase ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__UpperCamelCase ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : str = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
SCREAMING_SNAKE_CASE : Tuple = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE : str = (
dataset_class(
__UpperCamelCase ,type_path='train' ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '' ,)
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Any = (
dataset_class(
__UpperCamelCase ,type_path='val' ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '' ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE : List[str] = (
dataset_class(
__UpperCamelCase ,type_path='test' ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '' ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Dict = (
build_compute_metrics_fn(data_args.task ,__UpperCamelCase ) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE : List[Any] = SeqaSeqTrainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,data_args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,data_collator=SeqaSeqDataCollator(
__UpperCamelCase ,__UpperCamelCase ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : List[str] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
SCREAMING_SNAKE_CASE : Optional[Any] = train_result.metrics
SCREAMING_SNAKE_CASE : Tuple = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' ,__UpperCamelCase ,training_args.output_dir )
all_metrics.update(__UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate(metric_key_prefix='val' )
SCREAMING_SNAKE_CASE : int = data_args.n_val
SCREAMING_SNAKE_CASE : List[Any] = round(metrics['val_loss'] ,4 )
if trainer.is_world_process_zero():
handle_metrics('val' ,__UpperCamelCase ,training_args.output_dir )
all_metrics.update(__UpperCamelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
SCREAMING_SNAKE_CASE : str = trainer.predict(test_dataset=__UpperCamelCase ,metric_key_prefix='test' )
SCREAMING_SNAKE_CASE : Optional[int] = test_output.metrics
SCREAMING_SNAKE_CASE : Tuple = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE : Optional[Any] = round(metrics['test_loss'] ,4 )
handle_metrics('test' ,__UpperCamelCase ,training_args.output_dir )
all_metrics.update(__UpperCamelCase )
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = lmap(str.strip ,__UpperCamelCase )
write_txt_file(__UpperCamelCase ,os.path.join(training_args.output_dir ,'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__UpperCamelCase ,os.path.join(training_args.output_dir ,'all_results.json' ) )
return all_metrics
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 508 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=a__ ):
'''simple docstring'''
lowercase__ : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Dict:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> int:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=a__ ):
'''simple docstring'''
lowercase__ : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=a__ ):
'''simple docstring'''
lowercase__ : str = ['torch', 'transformers', 'onnx']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=a__ ):
'''simple docstring'''
lowercase__ : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=a__ ):
'''simple docstring'''
lowercase__ : Any = ['torch', 'transformers', 'onnx']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) | 90 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__: int = datasets.logging.get_logger(__name__)
lowerCAmelCase__: Optional[int] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCAmelCase__: Any = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCAmelCase__: Tuple = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="dummy_doc" ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Dict = {doc: key_lines}
SCREAMING_SNAKE_CASE_ : Dict = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : List[str] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ : Optional[int] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_coref_infos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ : Dict = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Dict = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ : Dict = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ : int = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Any = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ : Optional[int] = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 345 | 0 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = [x.strip() for x in open(UpperCAmelCase__ ).readlines()]
__SCREAMING_SNAKE_CASE = [x.strip() for x in open(UpperCAmelCase__ ).readlines()][: len(UpperCAmelCase__ )]
__SCREAMING_SNAKE_CASE = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
if save_path is not None:
save_json(UpperCAmelCase__ , UpperCAmelCase__ , indent=UpperCAmelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 690 |
"""simple docstring"""
def _a ( UpperCAmelCase__ = 10**9 ) -> int:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 690 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase_ )[0].split("." )[-2]
lowerCAmelCase__ = mapped_key.replace("*" , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ = "weight_v"
elif "weight" in name:
lowerCAmelCase__ = "weight"
elif "bias" in name:
lowerCAmelCase__ = "bias"
else:
lowerCAmelCase__ = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ = name.split("." )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = SEWConfig()
if is_finetuned:
lowerCAmelCase__ = model.wav_encoder.wav_model.cfg
else:
lowerCAmelCase__ = model.cfg
lowerCAmelCase__ = fs_config.conv_bias
lowerCAmelCase__ = eval(fs_config.conv_feature_layers )
lowerCAmelCase__ = [x[0] for x in conv_layers]
lowerCAmelCase__ = [x[1] for x in conv_layers]
lowerCAmelCase__ = [x[2] for x in conv_layers]
lowerCAmelCase__ = "gelu"
lowerCAmelCase__ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = fs_config.activation_fn.name
lowerCAmelCase__ = fs_config.encoder_embed_dim
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = fs_config.encoder_ffn_embed_dim
lowerCAmelCase__ = 1E-5
lowerCAmelCase__ = fs_config.encoder_layerdrop
lowerCAmelCase__ = fs_config.encoder_attention_heads
lowerCAmelCase__ = fs_config.conv_pos_groups
lowerCAmelCase__ = fs_config.conv_pos
lowerCAmelCase__ = len(lowerCAmelCase_ )
lowerCAmelCase__ = fs_config.encoder_layers
lowerCAmelCase__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCAmelCase__ = model.cfg
lowerCAmelCase__ = fs_config.final_dropout
lowerCAmelCase__ = fs_config.layerdrop
lowerCAmelCase__ = fs_config.activation_dropout
lowerCAmelCase__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCAmelCase__ = fs_config.attention_dropout
lowerCAmelCase__ = fs_config.dropout_input
lowerCAmelCase__ = fs_config.dropout
lowerCAmelCase__ = fs_config.mask_channel_length
lowerCAmelCase__ = fs_config.mask_channel_prob
lowerCAmelCase__ = fs_config.mask_length
lowerCAmelCase__ = fs_config.mask_prob
lowerCAmelCase__ = "Wav2Vec2FeatureExtractor"
lowerCAmelCase__ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=True ):
"""simple docstring"""
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCAmelCase__ = SEWConfig.from_pretrained(lowerCAmelCase_ )
else:
lowerCAmelCase__ = convert_config(model[0] , lowerCAmelCase_ )
lowerCAmelCase__ = model[0].eval()
lowerCAmelCase__ = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase_ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = SEWForCTC(lowerCAmelCase_ )
else:
lowerCAmelCase__ = SEWModel(lowerCAmelCase_ )
feature_extractor.save_pretrained(lowerCAmelCase_ )
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
hf_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 61 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=4 , ) -> Tuple:
lowercase : Optional[Any] = parent
lowercase : int = batch_size
lowercase : int = seq_length
lowercase : List[str] = is_training
lowercase : str = use_attention_mask
lowercase : List[str] = use_token_type_ids
lowercase : Optional[Any] = use_labels
lowercase : Dict = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : int = hidden_act
lowercase : Dict = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : int = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : Union[str, Any] = num_choices
def a__ ( self ) -> Optional[int]:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any = None
if self.use_attention_mask:
lowercase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a_ , )
return config, input_ids, attention_mask
def a__ ( self ) -> List[str]:
lowercase : Dict = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Dict = config_and_inputs
lowercase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Dict:
lowercase : Optional[int] = FlaxDistilBertModelTester(self )
@slow
def a__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowercase : int = model_class_name.from_pretrained("distilbert-base-uncased" )
lowercase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> Union[str, Any]:
lowercase : str = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase : List[str] = model(a_ , attention_mask=a_ )[0]
lowercase : int = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a_ )
lowercase : List[str] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 372 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Dict = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 616 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A_ : Tuple = logging.getLogger(__name__)
A_ : Tuple = "Hello world! cécé herlolip"
A_ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=snake_case__ , large=snake_case__ , share_emb=snake_case__ , use_bert_emb=snake_case__ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , lambda snake_case__ , snake_case__ : storage )
SCREAMING_SNAKE_CASE__ = AbsSummarizer(snake_case__ , torch.device("""cpu""" ) , snake_case__ )
original.eval()
SCREAMING_SNAKE_CASE__ = BertAbsSummarizer(snake_case__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(snake_case__ )) )
SCREAMING_SNAKE_CASE__ = torch.tensor(snake_case__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(snake_case__ )) )
SCREAMING_SNAKE_CASE__ = torch.tensor(snake_case__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE__ = encoder_input_ids
SCREAMING_SNAKE_CASE__ = decoder_input_ids
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE__ = original(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
SCREAMING_SNAKE_CASE__ = original.generator(snake_case__ )
SCREAMING_SNAKE_CASE__ = new_model(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
SCREAMING_SNAKE_CASE__ = new_model.generator(snake_case__ )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(snake_case__ ) )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(snake_case__ ) )
SCREAMING_SNAKE_CASE__ = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
A_ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 616 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =BartphoTokenizer
__UpperCAmelCase =False
__UpperCAmelCase =True
def UpperCamelCase ( self )-> Any:
super().setUp()
_A = ['▁This', '▁is', '▁a', '▁t', 'est']
_A = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_A = {'unk_token': '<unk>'}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
_A = BartphoTokenizer(_UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , **_UpperCamelCase )-> Dict:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase ( self , _UpperCamelCase )-> Optional[Any]:
_A = 'This is a là test'
_A = 'This is a<unk><unk> test'
return input_text, output_text
def UpperCamelCase ( self )-> Union[str, Any]:
_A = BartphoTokenizer(_UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
_A = 'This is a là test'
_A = '▁This ▁is ▁a ▁l à ▁t est'.split()
_A = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_A = tokens + [tokenizer.unk_token]
_A = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
| 292 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase_ ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : int ) -> float:
"""simple docstring"""
_A = x
_A = y
for step in range(__UpperCamelCase ): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase_ ( __UpperCamelCase : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def lowerCamelCase_ ( __UpperCamelCase : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase , 1 , 1 ) )
def lowerCamelCase_ ( __UpperCamelCase : int = 8_0_0 , __UpperCamelCase : int = 6_0_0 , __UpperCamelCase : float = -0.6 , __UpperCamelCase : float = 0 , __UpperCamelCase : float = 3.2 , __UpperCamelCase : int = 5_0 , __UpperCamelCase : bool = True , ) -> Image.Image:
"""simple docstring"""
_A = Image.new('RGB' , (image_width, image_height) )
_A = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(__UpperCamelCase )
else:
_A = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 292 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = """MobileNetV1Config"""
# Base docstring
UpperCAmelCase = """google/mobilenet_v1_1.0_224"""
UpperCAmelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCAmelCase = """google/mobilenet_v1_1.0_224"""
UpperCAmelCase = """tabby, tabby cat"""
UpperCAmelCase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( a__ : int , a__ : List[Any] , a__ : str=None ) -> Union[str, Any]:
_UpperCamelCase = {}
if isinstance(a__ , a__ ):
_UpperCamelCase = model.mobilenet_va
else:
_UpperCamelCase = model
_UpperCamelCase = '''MobilenetV1/Conv2d_0/'''
_UpperCamelCase = backbone.conv_stem.convolution.weight
_UpperCamelCase = backbone.conv_stem.normalization.bias
_UpperCamelCase = backbone.conv_stem.normalization.weight
_UpperCamelCase = backbone.conv_stem.normalization.running_mean
_UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_UpperCamelCase = i + 1
_UpperCamelCase = i * 2
_UpperCamelCase = backbone.layer[pt_index]
_UpperCamelCase = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_UpperCamelCase = pointer.convolution.weight
_UpperCamelCase = pointer.normalization.bias
_UpperCamelCase = pointer.normalization.weight
_UpperCamelCase = pointer.normalization.running_mean
_UpperCamelCase = pointer.normalization.running_var
_UpperCamelCase = backbone.layer[pt_index + 1]
_UpperCamelCase = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_UpperCamelCase = pointer.convolution.weight
_UpperCamelCase = pointer.normalization.bias
_UpperCamelCase = pointer.normalization.weight
_UpperCamelCase = pointer.normalization.running_mean
_UpperCamelCase = pointer.normalization.running_var
if isinstance(a__ , a__ ):
_UpperCamelCase = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
_UpperCamelCase = model.classifier.weight
_UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def lowercase ( a__ : str , a__ : List[str] , a__ : int ) -> Tuple:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
_UpperCamelCase = tf.train.list_variables(a__ )
_UpperCamelCase = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
_UpperCamelCase = tf.train.load_variable(a__ , a__ )
_UpperCamelCase = array
# Build TF to PyTorch weights loading map
_UpperCamelCase = _build_tf_to_pytorch_map(a__ , a__ , a__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
_UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
_UpperCamelCase = np.transpose(a__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
_UpperCamelCase = array.squeeze().transpose()
else:
_UpperCamelCase = np.transpose(a__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
_UpperCamelCase = torch.from_numpy(a__ )
tf_weights.pop(a__ , a__ )
tf_weights.pop(name + '''/RMSProp''' , a__ )
tf_weights.pop(name + '''/RMSProp_1''' , a__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , a__ )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def lowercase ( a__ : torch.Tensor , a__ : nn.Convad ) -> torch.Tensor:
_UpperCamelCase , _UpperCamelCase = features.shape[-2:]
_UpperCamelCase , _UpperCamelCase = conv_layer.stride
_UpperCamelCase , _UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
_UpperCamelCase = max(kernel_height - stride_height , 0 )
else:
_UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_UpperCamelCase = max(kernel_width - stride_width , 0 )
else:
_UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0 )
_UpperCamelCase = pad_along_width // 2
_UpperCamelCase = pad_along_width - pad_left
_UpperCamelCase = pad_along_height // 2
_UpperCamelCase = pad_along_height - pad_top
_UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(a__ , a__ , '''constant''' , 0.0 )
class UpperCAmelCase_ ( nn.Module):
def __init__( self : str , __UpperCamelCase : MobileNetVaConfig , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[bool] = True , __UpperCamelCase : Optional[bool or str] = True , ) -> None:
super().__init__()
_UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_UpperCamelCase = nn.Convad(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=__UpperCamelCase , stride=__UpperCamelCase , padding=__UpperCamelCase , groups=__UpperCamelCase , bias=__UpperCamelCase , padding_mode='''zeros''' , )
if use_normalization:
_UpperCamelCase = nn.BatchNormad(
num_features=__UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=__UpperCamelCase , track_running_stats=__UpperCamelCase , )
else:
_UpperCamelCase = None
if use_activation:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __UpperCamelCase ):
_UpperCamelCase = ACTaFN[config.hidden_act]
else:
_UpperCamelCase = config.hidden_act
else:
_UpperCamelCase = None
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
_UpperCamelCase = apply_tf_padding(__UpperCamelCase , self.convolution )
_UpperCamelCase = self.convolution(__UpperCamelCase )
if self.normalization is not None:
_UpperCamelCase = self.normalization(__UpperCamelCase )
if self.activation is not None:
_UpperCamelCase = self.activation(__UpperCamelCase )
return features
class UpperCAmelCase_ ( _lowercase):
snake_case__ = MobileNetVaConfig
snake_case__ = load_tf_weights_in_mobilenet_va
snake_case__ = '''mobilenet_v1'''
snake_case__ = '''pixel_values'''
snake_case__ = False
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(__UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , _lowercase , )
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Optional[int] , __UpperCamelCase : MobileNetVaConfig , __UpperCamelCase : bool = True ) -> str:
super().__init__(__UpperCamelCase )
_UpperCamelCase = config
_UpperCamelCase = 32
_UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
_UpperCamelCase = MobileNetVaConvLayer(
__UpperCamelCase , in_channels=config.num_channels , out_channels=__UpperCamelCase , kernel_size=3 , stride=2 , )
_UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_UpperCamelCase = nn.ModuleList()
for i in range(13 ):
_UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__UpperCamelCase , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__UpperCamelCase , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=1 , ) )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self : int , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
_UpperCamelCase = self.conv_stem(__UpperCamelCase )
_UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_UpperCamelCase = layer_module(__UpperCamelCase )
if output_hidden_states:
_UpperCamelCase = all_hidden_states + (hidden_states,)
_UpperCamelCase = hidden_states
if self.pooler is not None:
_UpperCamelCase = torch.flatten(self.pooler(__UpperCamelCase ) , start_dim=1 )
else:
_UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCamelCase , pooler_output=__UpperCamelCase , hidden_states=__UpperCamelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowercase , )
class UpperCAmelCase_ ( _lowercase):
def __init__( self : int , __UpperCamelCase : MobileNetVaConfig ) -> None:
super().__init__(__UpperCamelCase )
_UpperCamelCase = config.num_labels
_UpperCamelCase = MobileNetVaModel(__UpperCamelCase )
_UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCamelCase )
_UpperCamelCase = nn.Linear(__UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self : str , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.mobilenet_va(__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(self.dropout(__UpperCamelCase ) )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states , )
| 707 | """simple docstring"""
from typing import Any
import numpy as np
def lowercase ( a__ : np.ndarray ) -> bool:
return np.array_equal(a__ , matrix.conjugate().T )
def lowercase ( a__ : np.ndarray , a__ : np.ndarray ) -> Any:
_UpperCamelCase = v.conjugate().T
_UpperCamelCase = v_star.dot(a__ )
assert isinstance(a__ , np.ndarray )
return (v_star_dot.dot(a__ )) / (v_star.dot(a__ ))
def lowercase ( ) -> None:
_UpperCamelCase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(a__ ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(a__ , a__ ) )
_UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(a__ ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(a__ , a__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 342 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : dict , lowercase : str , lowercase : set , lowercase : set , lowercase : dict , lowercase : dict , lowercase : PriorityQueue , lowercase : dict , lowercase : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ = cst_fwd.get(lowercase , np.inf )
lowerCamelCase_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase_ = new_cost_f
lowerCamelCase_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : dict , lowercase : dict ):
'''simple docstring'''
lowerCamelCase_ = -1
lowerCamelCase_ = set()
lowerCamelCase_ = set()
lowerCamelCase_ = {source: 0}
lowerCamelCase_ = {destination: 0}
lowerCamelCase_ = {source: None}
lowerCamelCase_ = {destination: None}
lowerCamelCase_ = PriorityQueue()
lowerCamelCase_ = PriorityQueue()
lowerCamelCase_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ , lowerCamelCase_ = queue_forward.get()
visited_forward.add(lowercase )
lowerCamelCase_ , lowerCamelCase_ = queue_backward.get()
visited_backward.add(lowercase )
lowerCamelCase_ = pass_and_relaxation(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
lowerCamelCase_ = pass_and_relaxation(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ = shortest_distance
return shortest_path_distance
lowerCamelCase : Optional[int] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
lowerCamelCase : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
from __future__ import annotations
from typing import Any
class A( UpperCamelCase ):
'''simple docstring'''
pass
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : Any ) -> None:
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = None
def __iter__( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self
lowerCamelCase_ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(A_ )
yield node.data
lowerCamelCase_ = node.next_node
@property
def a__ ( self : List[str] ) -> bool:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase : int = Node(1)
lowerCamelCase : Optional[int] = Node(2)
lowerCamelCase : Union[str, Any] = Node(3)
lowerCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
lowerCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase : Dict = Node(5)
lowerCamelCase : Optional[int] = Node(6)
lowerCamelCase : str = Node(5)
lowerCamelCase : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
lowerCamelCase : List[str] = Node(1)
print(root_node.has_loop) # False
| 70 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = tempfile.mkdtemp()
snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
snake_case = os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
"""simple docstring"""
snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = self.get_image_processor()
snake_case = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
snake_case = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
snake_case = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
snake_case = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(lowerCAmelCase , return_tensors='np' )
snake_case = processor(images=lowerCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
snake_case = 'lower newer'
snake_case = processor(text=lowerCAmelCase )
snake_case = tokenizer(lowerCAmelCase , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
snake_case = 'lower newer'
snake_case = self.prepare_image_inputs()
snake_case = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case = processor.batch_decode(lowerCAmelCase )
snake_case = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
snake_case = 'lower newer'
snake_case = self.prepare_image_inputs()
snake_case = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 710 | """simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Dict:
"""simple docstring"""
def is_in_circle(_UpperCamelCase : float , _UpperCamelCase : float ) -> bool:
snake_case = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(_UpperCamelCase : float ) -> float:
return x
snake_case = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> None:
"""simple docstring"""
def function_to_integrate(_UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
_A = logging.get_logger()
# the current default level is logging.WARNING
_A = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = logging.get_verbosity()
_A = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_A = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(lowerCAmelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def UpperCAmelCase ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_A = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_A = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCAmelCase_ )
_A = logging.log_levels[env_level_str]
_A = logging.get_verbosity()
self.assertEqual(
lowerCAmelCase_ , lowerCAmelCase_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
_A = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def UpperCAmelCase ( self ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_A = logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def UpperCAmelCase ( self ) -> List[str]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_A = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_A = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning_advice(lowerCAmelCase_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning_advice(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
def snake_case ( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 401 | import math
def snake_case ( snake_case__ :int) -> list:
_A = [True] * n
_A = False
_A = False
_A = True
for i in range(3 , int(n**0.5 + 1) , 2):
_A = i * 2
while index < n:
_A = False
_A = index + i
_A = [2]
for i in range(3 , snake_case__ , 2):
if is_prime[i]:
primes.append(snake_case__)
return primes
def snake_case ( snake_case__ :int = 999_966_663_333) -> int:
_A = math.floor(math.sqrt(snake_case__)) + 100
_A = prime_sieve(snake_case__)
_A = 0
_A = 0
_A = primes[prime_index]
while (last_prime**2) <= limit:
_A = primes[prime_index + 1]
_A = last_prime**2
_A = next_prime**2
# Get numbers divisible by lps(current)
_A = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_A = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_A = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_A = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 401 | 1 |
"""simple docstring"""
import numpy as np
def _lowerCamelCase ( lowerCamelCase__ : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 128 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase : Union[str, Any] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase : Tuple = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase : Union[str, Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase : Optional[Any] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase : Union[str, Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def A_( A : Optional[int] , A : List[str]):
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(A , A)
return k
def A_( A : dict , A : dict):
UpperCamelCase = BigBirdPegasusConfig(**A)
UpperCamelCase = BigBirdPegasusForConditionalGeneration(A)
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder')}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder')}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion'):
UpperCamelCase = [k.endswith(A) for ending in KEYS_TO_IGNORE]
if any(A):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(A , A)
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''')
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value']):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(A)
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion'):
UpperCamelCase = [k.endswith(A) for ending in KEYS_TO_IGNORE]
if any(A):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(A , A)
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''')
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value']):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(A)
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
UpperCamelCase = mapping['model.embed_positions.weight']
UpperCamelCase = mapping.pop('model.embed_positions.weight')
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(A , strict=A)
UpperCamelCase = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def A_( A : List[str]):
UpperCamelCase = tf.train.list_variables(A)
UpperCamelCase = {}
UpperCamelCase = ['global_step']
for name, shape in tqdm(A , desc='converting tf checkpoint to dict'):
UpperCamelCase = any(pat in name for pat in ignore_name)
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(A , A)
UpperCamelCase = array
return tf_weights
def A_( A : str , A : str , A : dict):
UpperCamelCase = get_tf_weights_as_numpy(A)
UpperCamelCase = convert_bigbird_pegasus(A , A)
torch_model.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase : Dict = parser.parse_args()
lowerCAmelCase : Any = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 3 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = size if size is not None else {'shortest_edge': 20}
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_flip_channel_order
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_flip_channel_order' ) )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 384 |
'''simple docstring'''
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
A_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {'BertModelTest': 'BertModelTester'}
lowerCamelCase_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowerCamelCase_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowerCamelCase_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 384 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowercase__ : TransformeraDModel , lowercase__ : AutoencoderKL , lowercase__ : KarrasDiffusionSchedulers , lowercase__ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__ )
# create a imagenet -> id dictionary for easier use
__lowercase : int = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowercase : List[str] = int(lowercase__ )
__lowercase : str = dict(sorted(self.labels.items() ) )
def snake_case ( self : str , lowercase__ : Union[str, List[str]] ):
if not isinstance(lowercase__ , lowercase__ ):
__lowercase : Any = list(lowercase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowercase__ : List[int] , lowercase__ : float = 4.0 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : int = 5_0 , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ):
__lowercase : Dict = len(lowercase__ )
__lowercase : Tuple = self.transformer.config.sample_size
__lowercase : str = self.transformer.config.in_channels
__lowercase : int = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__lowercase : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowercase : str = torch.tensor(lowercase__ , device=self.device ).reshape(-1 )
__lowercase : Optional[int] = torch.tensor([1_0_0_0] * batch_size , device=self.device )
__lowercase : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowercase : List[Any] = latent_model_input[: len(lowercase__ ) // 2]
__lowercase : Optional[Any] = torch.cat([half, half] , dim=0 )
__lowercase : str = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
__lowercase : List[str] = t
if not torch.is_tensor(lowercase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowercase : List[str] = latent_model_input.device.type == "mps"
if isinstance(lowercase__ , lowercase__ ):
__lowercase : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
__lowercase : Dict = torch.intaa if is_mps else torch.intaa
__lowercase : List[Any] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowercase : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowercase : Optional[Any] = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__ ).sample
# perform guidance
if guidance_scale > 1:
__lowercase ,__lowercase : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowercase ,__lowercase : Optional[int] = torch.split(lowercase__ , len(lowercase__ ) // 2 , dim=0 )
__lowercase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowercase : str = torch.cat([half_eps, half_eps] , dim=0 )
__lowercase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowercase ,__lowercase : int = torch.split(lowercase__ , lowercase__ , dim=1 )
else:
__lowercase : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__lowercase : Optional[Any] = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
if guidance_scale > 1:
__lowercase ,__lowercase : Tuple = latent_model_input.chunk(2 , dim=0 )
else:
__lowercase : Tuple = latent_model_input
__lowercase : Any = 1 / self.vae.config.scaling_factor * latents
__lowercase : Optional[int] = self.vae.decode(lowercase__ ).sample
__lowercase : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : int = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__ )
| 575 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , a : Optional[int] , a : List[str]=3 , a : List[str]=32 , a : Optional[Any]=3 , a : List[Any]=10 , a : Union[str, Any]=[8, 16, 32, 64] , a : List[str]=[1, 1, 2, 1] , a : List[str]=True , a : Optional[int]=True , a : Any="relu" , a : Union[str, Any]=3 , a : int=None , a : str=["stage2", "stage3", "stage4"] , a : List[Any]=[2, 3, 4] , a : Union[str, Any]=1 , )-> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(a )
lowercase__ = out_features
lowercase__ = out_indices
lowercase__ = num_groups
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Any , a : Dict , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = BitModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[str] , a : Optional[int] , a : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = BitForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] , a : List[Any] , a : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = BitBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = BitBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_UpperCamelCase : Dict = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ = BitModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='Bit does not output attentions' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=a )
for name, module in model.named_modules():
if isinstance(a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
def check_hidden_states_output(a : int , a : Optional[Any] , a : Dict ):
lowercase__ = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a , a ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(a , a , a )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = BitModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> Dict:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
lowercase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = (BitBackbone,) if is_torch_available() else ()
_UpperCamelCase : Any = BitConfig
_UpperCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = BitModelTester(self )
| 45 |
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__snake_case = ['''torch''', '''scipy''']
def __init__( self : Any , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def lowercase__ ( cls : Dict , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def lowercase__ ( cls : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch""", """scipy"""] )
| 215 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> List[Any]:
if attention_mask is None:
_snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase :
def __init__( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[Any]=1_3 , __lowerCamelCase : int=7 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : Dict=0 , __lowerCamelCase : List[str]=0.0_2 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = initializer_range
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_snake_case = shift_tokens_right(__lowerCamelCase , 1 , 2 )
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
_snake_case = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case , _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = 2_0
_snake_case = model_class_name(__lowerCamelCase )
_snake_case = model.encode(inputs_dict['''input_ids'''] )
_snake_case , _snake_case = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_snake_case = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
_snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
_snake_case = model.decode(__lowerCamelCase , __lowerCamelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = 2_0
_snake_case = model_class_name(__lowerCamelCase )
_snake_case = model.encode(inputs_dict['''input_ids'''] )
_snake_case , _snake_case = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_snake_case = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_snake_case = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
A__ : Dict = 99
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_snake_case = input_ids.shape[0]
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case = self._get_config_and_data()
_snake_case = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
_snake_case = lm_model(input_ids=__lowerCamelCase )
_snake_case = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_snake_case = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
_snake_case = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_snake_case = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_snake_case = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
_snake_case = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_snake_case = shift_tokens_right(__lowerCamelCase , 1 , 2 )
_snake_case = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
_snake_case = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase,__SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = True
A__ : int = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
A__ : List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = FlaxBlenderbotModelTester(self )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_snake_case = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase : int , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Union[str, Any] ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
_snake_case = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_snake_case = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = model_class(__lowerCamelCase )
_snake_case = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_snake_case = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
_snake_case = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_snake_case = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
_snake_case = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__lowerCamelCase )
_snake_case = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_snake_case = ['''Sam''']
_snake_case = tokenizer(__lowerCamelCase , return_tensors='''jax''' )
_snake_case = model.generate(**__lowerCamelCase , **__lowerCamelCase )
_snake_case = '''Sam is a great name. It means "sun" in Gaelic.'''
_snake_case = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 103 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """T5Config"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : jnp.array , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> jnp.ndarray:
__lowercase = jnp.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE )
__lowercase = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return shifted_input_ids
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "mt5"
lowerCAmelCase__ : List[str] = MTaConfig
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "mt5"
lowerCAmelCase__ : Optional[Any] = MTaConfig
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = "mt5"
lowerCAmelCase__ : List[Any] = MTaConfig
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
config.addinivalue_line(
'markers' ,'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' ,'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' ,'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' ,'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' ,'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' ,'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowercase__( __UpperCamelCase: List[str] ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase ,id=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
if exitstatus == 5:
SCREAMING_SNAKE_CASE : Any = 0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("IGNORE_RESULT")
UpperCamelCase_ = doctest.OutputChecker
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, A, A, A )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 28 |
snake_case_ : List[Any] = "Alexander Joslin"
import operator as op
from .stack import Stack
def __a ( __UpperCAmelCase : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[str] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowerCamelCase_ : Stack[int] = Stack()
lowerCamelCase_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCAmelCase )
elif i == ")":
# RULE 4
lowerCamelCase_ : Optional[int] = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ : Optional[int] = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ : Any = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ : int = operators[opr](__UpperCAmelCase , __UpperCAmelCase )
operand_stack.push(__UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case_ : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 488 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'umt5'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
def __init__( self ,a_=25_0112 ,a_=512 ,a_=64 ,a_=1024 ,a_=8 ,a_=None ,a_=6 ,a_=32 ,a_=128 ,a_=0.1 ,a_=1e-6 ,a_=1.0 ,a_="gated-gelu" ,a_=True ,a_=True ,a_="T5Tokenizer" ,a_=True ,a_=0 ,a_=1 ,a_=0 ,**a_ ,):
"""simple docstring"""
super().__init__(
is_encoder_decoder=a_ ,tokenizer_class=a_ ,tie_word_embeddings=a_ ,pad_token_id=a_ ,eos_token_id=a_ ,decoder_start_token_id=a_ ,**a_ ,)
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split('-' )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == 'gated'
if len(a_ ) > 1 and act_info[0] != "gated" or len(a_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = 'gelu_new'
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.d_model
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.num_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.num_layers
class __snake_case ( SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowerCAmelCase__ = 'past_encoder_sequence + sequence'
lowerCAmelCase__ = {0: 'batch'}
lowerCAmelCase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
lowerCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(a_ ,direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 13
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 5e-4
| 604 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self ,a_ ,a_=13 ,a_=32 ,a_=2 ,a_=3 ,a_=16 ,a_=[1, 2, 1] ,a_=[2, 2, 4] ,a_=2 ,a_=2.0 ,a_=True ,a_=0.0 ,a_=0.0 ,a_=0.1 ,a_="gelu" ,a_=False ,a_=True ,a_=0.02 ,a_=1e-5 ,a_=True ,a_=None ,a_=True ,a_=10 ,a_=8 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = patch_norm
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = is_training
lowerCAmelCase__ = scope
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = encoder_stride
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ )
lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = SwinvaForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = SwinvaForImageClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModelTester(self )
lowerCAmelCase__ = ConfigTester(self ,config_class=a_ ,embed_dim=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.attentions
lowerCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(a_ ) ,a_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = config.window_size**2
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a_ ) ,a_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowerCAmelCase__ = len(a_ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
lowerCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states ,len(a_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a_ ) ,a_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a_ ) ,a_ )
# Swinv2 has a different seq_length
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowerCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(a_ ) ,a_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reshaped_hidden_states[0].shape
lowerCAmelCase__ = (
reshaped_hidden_states[0].view(a_ ,a_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = SwinvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(a_ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=a_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**a_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
| 604 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ : Optional[int] =logging.get_logger(__name__)
def lowercase__ ( __lowercase : Any , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowercase__ ( __lowercase : np.ndarray , __lowercase : Optional[str] , __lowercase : Optional[str] ) -> int:
"""simple docstring"""
__UpperCamelCase = to_pil_image(__lowercase )
__UpperCamelCase , __UpperCamelCase = pil_image.size
__UpperCamelCase = pytesseract.image_to_data(__lowercase , lang=__lowercase , output_type='dict' , config=__lowercase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
__UpperCamelCase = [idx for idx, word in enumerate(__lowercase ) if not word.strip()]
__UpperCamelCase = [word for idx, word in enumerate(__lowercase ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__UpperCamelCase = []
for x, y, w, h in zip(__lowercase , __lowercase , __lowercase , __lowercase ):
__UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(__lowercase )
# finally, normalize the bounding boxes
__UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowercase , __lowercase , __lowercase ) )
assert len(__lowercase ) == len(__lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =["pixel_values"]
def __init__( self : int , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : float = 1 / 2_5_5 , __A : bool = True , __A : Union[float, Iterable[float]] = None , __A : Union[float, Iterable[float]] = None , __A : bool = True , __A : Optional[str] = None , __A : Optional[str] = "" , **__A : Any , ):
super().__init__(**__A )
__UpperCamelCase = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
__UpperCamelCase = get_size_dict(__A )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_value
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
__UpperCamelCase = apply_ocr
__UpperCamelCase = ocr_lang
__UpperCamelCase = tesseract_config
def _lowerCamelCase ( self : int , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[str] , ):
__UpperCamelCase = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
__UpperCamelCase = (size['height'], size['width'])
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def _lowerCamelCase ( self : Tuple , __A : np.ndarray , __A : Union[int, float] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def _lowerCamelCase ( self : List[Any] , __A : np.ndarray , __A : Union[float, Iterable[float]] , __A : Union[float, Iterable[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Any , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def _lowerCamelCase ( self : str , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : Dict=None , __A : bool = None , __A : float = None , __A : bool = None , __A : Union[float, Iterable[float]] = None , __A : Union[float, Iterable[float]] = None , __A : bool = None , __A : Optional[str] = None , __A : Optional[str] = None , __A : Optional[Union[str, TensorType]] = None , __A : ChannelDimension = ChannelDimension.FIRST , **__A : Optional[int] , ):
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(__A )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__UpperCamelCase = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__A ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
__UpperCamelCase = []
__UpperCamelCase = []
for image in images:
__UpperCamelCase , __UpperCamelCase = apply_tesseract(__A , __A , __A )
words_batch.append(__A )
boxes_batch.append(__A )
if do_resize:
__UpperCamelCase = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__A , __A ) for image in images]
__UpperCamelCase = BatchFeature(data={'pixel_values': images} , tensor_type=__A )
if apply_ocr:
__UpperCamelCase = words_batch
__UpperCamelCase = boxes_batch
return data
| 399 |
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
__UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 1:
__UpperCamelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , __lowercase ):
for _ in range(__lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : int =0
try:
a__ : str =proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 399 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = """▁"""
lowerCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
lowerCAmelCase : List[str] = {"""vinai/bartpho-syllable""": 1024}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
__SCREAMING_SNAKE_CASE: Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: List[Any] = vocab_file
__SCREAMING_SNAKE_CASE: Union[str, Any] = monolingual_vocab_file
__SCREAMING_SNAKE_CASE: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__SCREAMING_SNAKE_CASE: Dict = {}
__SCREAMING_SNAKE_CASE: Optional[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
__SCREAMING_SNAKE_CASE: List[str] = cnt
cnt += 1
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
__SCREAMING_SNAKE_CASE: Any = line.strip().split()[0]
__SCREAMING_SNAKE_CASE: Optional[int] = len(self.fairseq_tokens_to_ids )
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
__SCREAMING_SNAKE_CASE: Union[str, Any] = len(self.fairseq_tokens_to_ids )
__SCREAMING_SNAKE_CASE: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.__dict__.copy()
__SCREAMING_SNAKE_CASE: List[str] = None
__SCREAMING_SNAKE_CASE: Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE: int = {}
__SCREAMING_SNAKE_CASE: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE: Optional[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE: List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = [self.sep_token_id]
__SCREAMING_SNAKE_CASE: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = ''''''.join(_lowerCAmelCase ).replace(_lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE: Optional[Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE: int = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE: Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(_lowerCAmelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 146 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase ( UpperCamelCase__ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(UpperCamelCase__ ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
__SCREAMING_SNAKE_CASE: Dict = QuantumRegister(UpperCamelCase__ , '''qr''' )
__SCREAMING_SNAKE_CASE: Optional[Any] = ClassicalRegister(UpperCamelCase__ , '''cr''' )
__SCREAMING_SNAKE_CASE: List[str] = QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[Any] = number_of_qubits
for i in range(UpperCamelCase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase__ , UpperCamelCase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase__ , UpperCamelCase__ )
# simulate with 10000 shots
__SCREAMING_SNAKE_CASE: Dict = Aer.get_backend('''qasm_simulator''' )
__SCREAMING_SNAKE_CASE: Tuple = execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_000 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 146 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : List[Any] , _A : Tuple=12 , _A : Tuple=7 , _A : str=True , _A : List[Any]=True , _A : Any=True , _A : Optional[Any]=99 , _A : Tuple=32 , _A : Dict=32 , _A : Tuple=2 , _A : Any=4 , _A : Dict=37 , _A : int=0.1 , _A : List[str]=0.1 , _A : Optional[int]=512 , _A : int=0.0_2 , _A : List[Any]=0 , _A : Union[str, Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Optional[int] = use_input_mask
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = projection_dim
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : List[Any] = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Union[str, Any] = bos_token_id
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Tuple = None
if self.use_input_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase__ : Optional[int] = input_mask.numpy()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = input_mask.shape
UpperCAmelCase__ : Dict = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = TFBlipTextModel(config=_A )
UpperCAmelCase__ : Optional[int] = model(_A , attention_mask=_A , training=_A )
UpperCAmelCase__ : List[Any] = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = BlipTextModelTester(self )
UpperCAmelCase__ : str = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
def lowercase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Any = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowercase_ ( self : Any , _A : Tuple=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 75 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCamelCase : Optional[int] = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
__UpperCamelCase : Tuple = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
__UpperCamelCase : Optional[int] = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int], SCREAMING_SNAKE_CASE__: str ) -> List[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[Any], SCREAMING_SNAKE_CASE__: List[str] ) -> Optional[Any]:
"""simple docstring"""
__a = simple_accuracy(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
__a = float(fa_score(y_true=SCREAMING_SNAKE_CASE__, y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: Optional[Any] ) -> int:
"""simple docstring"""
__a = float(pearsonr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0] )
__a = float(spearmanr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase , lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase , lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase , lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 448 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : List[Any] = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : str , ) -> Dict:
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
__magic_name__ : Optional[int] = field
__magic_name__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
__magic_name__ : Optional[Any] = Json(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , field=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
# Build iterable dataset
if self.streaming:
__magic_name__ : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ : int = None
__magic_name__ : Tuple = None
__magic_name__ : Tuple = None
__magic_name__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
__magic_name__ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase :
'''simple docstring'''
def __init__( self : int , lowerCamelCase_ : Dataset , lowerCamelCase_ : Union[PathLike, BinaryIO] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : List[Any] , ) -> List[Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__magic_name__ : Optional[Any] = dataset
__magic_name__ : Tuple = path_or_buf
__magic_name__ : Dict = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__magic_name__ : Any = num_proc
__magic_name__ : Tuple = '''utf-8'''
__magic_name__ : Tuple = to_json_kwargs
def UpperCAmelCase__ ( self : Dict ) -> int:
__magic_name__ : int = self.to_json_kwargs.pop('''path_or_buf''' , lowerCamelCase_ )
__magic_name__ : Any = self.to_json_kwargs.pop('''orient''' , '''records''' )
__magic_name__ : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__magic_name__ : Optional[int] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__magic_name__ : List[str] = self.to_json_kwargs.pop('''compression''' , lowerCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowerCamelCase_ ) as buffer:
__magic_name__ : str = self._write(file_obj=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__magic_name__ : List[str] = self._write(
file_obj=self.path_or_buf , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
return written
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Dict ) -> Optional[int]:
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Any = args
__magic_name__ : List[Any] = query_table(
table=self.dataset.data , key=slice(lowerCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__magic_name__ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **lowerCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : BinaryIO , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] , ) -> int:
__magic_name__ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__magic_name__ : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase_ )
else:
__magic_name__ , __magic_name__ : Optional[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCamelCase_ , lowerCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowerCamelCase_ )
return written
| 501 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 239 | '''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case : int | str ):
_A = str(__snake_case )
return n == n[::-1]
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 1_0_0_0_0_0_0 ):
_A = 0
for i in range(1 , __snake_case ):
if is_palindrome(__snake_case ) and is_palindrome(bin(__snake_case ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 107 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
__UpperCamelCase =42
__UpperCamelCase =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
"""simple docstring"""
def __A ( a_ : list[list] )-> list[list]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = current_set.copy()
for row_index, row in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Dict = row[0]
for column_index, column in enumerate(a_ ):
if magnitude == 0:
SCREAMING_SNAKE_CASE : Tuple = column
continue
SCREAMING_SNAKE_CASE : int = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE : Optional[int] = current_set[0]
SCREAMING_SNAKE_CASE : str = [first_row]
SCREAMING_SNAKE_CASE : Optional[int] = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(a_ )
continue
for column_index in range(len(a_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(a_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE : List[str] = final_set[0]
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE : List[Any] = simplify(a_ )
for i in range(len(a_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , a_ )
SCREAMING_SNAKE_CASE : List[str] = resultant
return final_set
def __A ( a_ : list[list] )-> list:
'''simple docstring'''
if len(a_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
SCREAMING_SNAKE_CASE : Dict = len(a_ ) + 1
if any(len(a_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(a_ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(a_ ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE : Dict = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE : Union[str, Any] = data_set.copy()
SCREAMING_SNAKE_CASE : Any = []
for row_index, row in enumerate(a_ ):
if 0 not in row:
SCREAMING_SNAKE_CASE : Any = data_set.pop(a_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = data_set.copy()
SCREAMING_SNAKE_CASE : List[Any] = simplify(a_ )
SCREAMING_SNAKE_CASE : Tuple = simplified[::-1]
SCREAMING_SNAKE_CASE : list = []
for row in simplified:
SCREAMING_SNAKE_CASE : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = row.copy()[: len(a_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(a_ ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE : Optional[Any] = temp_row[1::]
SCREAMING_SNAKE_CASE : Optional[int] = temp_row[::-1]
for column_index, column in enumerate(a_ ):
current_solution -= column * solutions[column_index]
solutions.append(a_ )
SCREAMING_SNAKE_CASE : int = []
for item in solutions:
final.append(float(round(a_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : List[str] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 698 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: Dict , __A: Dict , __A: List[str]=None , __A: Optional[Any]=True , __A: str=None , **__A: List[str] ) -> Any:
_A = parent
_A = config_class
_A = has_text_modality
_A = kwargs
_A = common_properties
def __A ( self: int ) -> Optional[int]:
_A = self.config_class(**self.inputs_dict )
_A = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__A , __A ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(__A ):
try:
setattr(__A , __A , __A )
self.parent.assertEqual(
getattr(__A , __A ) , __A , msg=f"""`{name} value {idx} expected, but was {getattr(__A , __A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__A ):
try:
_A = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__A , __A ) , __A , msg=f"""`{name} value {idx} expected, but was {getattr(__A , __A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __A ( self: Tuple ) -> List[str]:
_A = self.config_class(**self.inputs_dict )
_A = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __A )
def __A ( self: Union[str, Any] ) -> Optional[int]:
_A = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(__A , '''config.json''' )
config_first.to_json_file(__A )
_A = self.config_class.from_json_file(__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self: str ) -> Optional[Any]:
_A = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__A )
_A = self.config_class.from_pretrained(__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self: Optional[Any] ) -> Dict:
_A = self.config_class(**self.inputs_dict )
_A = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(__A , __A )
config_first.save_pretrained(__A )
_A = self.config_class.from_pretrained(__A , subfolder=__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self: int ) -> Tuple:
_A = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_A = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __A ( self: List[str] ) -> List[Any]:
if self.config_class.is_composition:
return
_A = self.config_class()
self.parent.assertIsNotNone(__A )
def __A ( self: Dict ) -> Dict:
_A = copy.deepcopy(__A )
_A = self.config_class(**__A )
_A = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(__A , __A ) != value:
wrong_values.append((key, getattr(__A , __A ), value) )
if len(__A ) > 0:
_A = '''\n'''.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def __A ( self: List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 62 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: List[str] ) -> int:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: str , **__A: Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: str , __A: List[str] ) -> int:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: Union[str, Any] ) -> Any:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __A ( self: Any ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__A , truncation=__A )['''input_ids''']
_A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: Any ) -> int:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__A )['''input_ids''']
_A = tok(__A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 62 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase__ = 3_00 # TEMPERATURE (unit = K)
def __snake_case ( lowercase : float , lowercase : float , lowercase : float , ):
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 508 |
'''simple docstring'''
def __snake_case ( lowercase : list ):
if len(lowercase ) <= 1:
return [tuple(lowercase )]
snake_case_ = []
def generate(lowercase : int , lowercase : list ):
snake_case_ = [0] * n
res.append(tuple(lowercase ) )
snake_case_ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case_ , snake_case_ = arr[i], arr[0]
else:
snake_case_ , snake_case_ = arr[i], arr[c[i]]
res.append(tuple(lowercase ) )
c[i] += 1
snake_case_ = 0
else:
snake_case_ = 0
i += 1
generate(len(lowercase ) , lowercase )
return res
if __name__ == "__main__":
lowercase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 508 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase ( a__ : NDArray[floataa] , a__ : NDArray[floataa] , a__ : list[int] , a__ : int , ) -> list[float]:
_UpperCamelCase , _UpperCamelCase = coefficient_matrix.shape
_UpperCamelCase , _UpperCamelCase = constant_matrix.shape
if rowsa != colsa:
_UpperCamelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if colsa != 1:
_UpperCamelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if rowsa != rowsa:
_UpperCamelCase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(a__ )
if len(a__ ) != rowsa:
_UpperCamelCase = (
'''Number of initial values must be equal to number of rows in coefficient '''
F'''matrix but received {len(a__ )} and {rowsa}'''
)
raise ValueError(a__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
_UpperCamelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCamelCase , _UpperCamelCase = table.shape
strictly_diagonally_dominant(a__ )
# Iterates the whole matrix for given number of times
for _ in range(a__ ):
_UpperCamelCase = []
for row in range(a__ ):
_UpperCamelCase = 0
for col in range(a__ ):
if col == row:
_UpperCamelCase = table[row][col]
elif col == cols - 1:
_UpperCamelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCamelCase = (temp + val) / denom
new_val.append(a__ )
_UpperCamelCase = new_val
return [float(a__ ) for i in new_val]
def lowercase ( a__ : NDArray[floataa] ) -> bool:
_UpperCamelCase , _UpperCamelCase = table.shape
_UpperCamelCase = True
for i in range(0 , a__ ):
_UpperCamelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Any ) -> Any:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[Any] ) -> str:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : Any , **__UpperCamelCase : int ) -> int:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 342 | 1 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
UpperCAmelCase_ = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 82 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase = (7_2_0, 1_2_8_0) # Height, Width
_lowerCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase = 1 / 1_0_0
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = 2_5_0
def _lowerCAmelCase ( ) ->None:
"""simple docstring"""
lowercase__ , lowercase__ = get_dataset(lowercase , lowercase )
for index in range(lowercase ):
lowercase__ = random.sample(range(len(lowercase ) ) , 4 )
lowercase__ , lowercase__ , lowercase__ = update_image_and_anno(
lowercase , lowercase , lowercase , lowercase , lowercase , filter_scale=lowercase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ = random_chars(3_2 )
lowercase__ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase__ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , lowercase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase__ = []
for anno in new_annos:
lowercase__ = anno[3] - anno[1]
lowercase__ = anno[4] - anno[2]
lowercase__ = anno[1] + width / 2
lowercase__ = anno[2] + height / 2
lowercase__ = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(lowercase )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _lowerCAmelCase ( lowercase : str , lowercase : str ) ->tuple[list, list]:
"""simple docstring"""
lowercase__ = []
lowercase__ = []
for label_file in glob.glob(os.path.join(lowercase , '''*.txt''' ) ):
lowercase__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase ) as in_file:
lowercase__ = in_file.readlines()
lowercase__ = os.path.join(lowercase , F'''{label_name}.jpg''' )
lowercase__ = []
for obj_list in obj_lists:
lowercase__ = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase__ = float(obj[1] ) - float(obj[3] ) / 2
lowercase__ = float(obj[2] ) - float(obj[4] ) / 2
lowercase__ = float(obj[1] ) + float(obj[3] ) / 2
lowercase__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase )
labels.append(lowercase )
return img_paths, labels
def _lowerCAmelCase ( lowercase : list , lowercase : list , lowercase : list[int] , lowercase : tuple[int, int] , lowercase : tuple[float, float] , lowercase : float = 0.0 , ) ->tuple[list, list, str]:
"""simple docstring"""
lowercase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ = int(scale_x * output_size[1] )
lowercase__ = int(scale_y * output_size[0] )
lowercase__ = []
lowercase__ = []
for i, index in enumerate(lowercase ):
lowercase__ = all_img_list[index]
path_list.append(lowercase )
lowercase__ = all_annos[index]
lowercase__ = cva.imread(lowercase )
if i == 0: # top-left
lowercase__ = cva.resize(lowercase , (divid_point_x, divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = bbox[1] * scale_x
lowercase__ = bbox[2] * scale_y
lowercase__ = bbox[3] * scale_x
lowercase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase__ = cva.resize(lowercase , (output_size[1] - divid_point_x, divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = scale_x + bbox[1] * (1 - scale_x)
lowercase__ = bbox[2] * scale_y
lowercase__ = scale_x + bbox[3] * (1 - scale_x)
lowercase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase__ = cva.resize(lowercase , (divid_point_x, output_size[0] - divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = bbox[1] * scale_x
lowercase__ = scale_y + bbox[2] * (1 - scale_y)
lowercase__ = bbox[3] * scale_x
lowercase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase__ = cva.resize(
lowercase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = scale_x + bbox[1] * (1 - scale_x)
lowercase__ = scale_y + bbox[2] * (1 - scale_y)
lowercase__ = scale_x + bbox[3] * (1 - scale_x)
lowercase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowerCAmelCase ( lowercase : int ) ->str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase__ = ascii_lowercase + digits
return "".join(random.choice(lowercase ) for _ in range(lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 161 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class snake_case__(lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = AutoencoderKL
lowercase_ = """sample"""
lowercase_ = 1e-2
@property
def snake_case ( self : int ):
lowercase__ : List[str] = 4
lowercase__ : int = 3
lowercase__ : int = (32, 32)
lowercase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase )
return {"sample": image}
@property
def snake_case ( self : int ):
return (3, 32, 32)
@property
def snake_case ( self : Optional[int] ):
return (3, 32, 32)
def snake_case ( self : int ):
lowercase__ : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowercase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Dict ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def snake_case ( self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
lowercase__ , lowercase__ : List[str] = self.prepare_init_args_and_inputs_for_common()
lowercase__ : Optional[int] = self.model_class(**__lowercase )
model.to(__lowercase )
assert not model.is_gradient_checkpointing and model.training
lowercase__ : List[str] = model(**__lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase__ : Dict = torch.randn_like(__lowercase )
lowercase__ : List[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase__ : str = self.model_class(**__lowercase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowercase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase__ : Dict = model_a(**__lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase__ : str = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
lowercase__ : Any = dict(model.named_parameters() )
lowercase__ : Tuple = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : List[str] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__lowercase )
lowercase__ : List[str] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
lowercase__ : Optional[int] = model.to(__lowercase )
model.eval()
if torch_device == "mps":
lowercase__ : Optional[Any] = torch.manual_seed(0 )
else:
lowercase__ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowercase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase__ : str = image.to(__lowercase )
with torch.no_grad():
lowercase__ : Dict = model(__lowercase , sample_posterior=__lowercase , generator=__lowercase ).sample
lowercase__ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase__ : Optional[int] = torch.tensor(
[
-4.0_0_7_8E-0_1,
-3.8_3_2_3E-0_4,
-1.2_6_8_1E-0_1,
-1.1_4_6_2E-0_1,
2.0_0_9_5E-0_1,
1.0_8_9_3E-0_1,
-8.8_2_4_7E-0_2,
-3.0_3_6_1E-0_1,
-9.8_6_4_4E-0_3,
] )
elif torch_device == "cpu":
lowercase__ : Optional[Any] = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase__ : int = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(__lowercase , __lowercase , rtol=1E-2 ) )
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__lowercase ) for s in shape] )}.npy"""
def snake_case ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : List[str]=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE : List[Any]=False ):
lowercase__ : int = torch.floataa if fpaa else torch.floataa
lowercase__ : Any = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowercase , __lowercase ) ) ).to(__lowercase ).to(__lowercase )
return image
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE : List[Any]=False ):
lowercase__ : List[Any] = "fp16" if fpaa else None
lowercase__ : Any = torch.floataa if fpaa else torch.floataa
lowercase__ : List[Any] = AutoencoderKL.from_pretrained(
__lowercase , subfolder="vae" , torch_dtype=__lowercase , revision=__lowercase , )
model.to(__lowercase ).eval()
return model
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int]=0 ):
if torch_device == "mps":
return torch.manual_seed(__lowercase )
return torch.Generator(device=__lowercase ).manual_seed(__lowercase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : str = self.get_sd_vae_model()
lowercase__ : Dict = self.get_sd_image(__lowercase )
lowercase__ : List[str] = self.get_generator(__lowercase )
with torch.no_grad():
lowercase__ : str = model(__lowercase , generator=__lowercase , sample_posterior=__lowercase ).sample
assert sample.shape == image.shape
lowercase__ : Optional[int] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase__ : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__lowercase , __lowercase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Dict = self.get_sd_vae_model(fpaa=__lowercase )
lowercase__ : List[Any] = self.get_sd_image(__lowercase , fpaa=__lowercase )
lowercase__ : Optional[int] = self.get_generator(__lowercase )
with torch.no_grad():
lowercase__ : Dict = model(__lowercase , generator=__lowercase , sample_posterior=__lowercase ).sample
assert sample.shape == image.shape
lowercase__ : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase__ : int = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Optional[int] = self.get_sd_vae_model()
lowercase__ : Tuple = self.get_sd_image(__lowercase )
with torch.no_grad():
lowercase__ : Dict = model(__lowercase ).sample
assert sample.shape == image.shape
lowercase__ : Tuple = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__lowercase , __lowercase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : str = self.get_sd_vae_model()
lowercase__ : Any = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase__ : Optional[int] = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase__ : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase__ : Optional[Any] = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[str] = self.get_sd_vae_model(fpaa=__lowercase )
lowercase__ : Dict = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) , fpaa=__lowercase )
with torch.no_grad():
lowercase__ : Any = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase__ : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase__ : Optional[int] = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : List[Any] = self.get_sd_vae_model(fpaa=__lowercase )
lowercase__ : str = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) , fpaa=__lowercase )
with torch.no_grad():
lowercase__ : Optional[Any] = model.decode(__lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase__ : Dict = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__lowercase , __lowercase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : int = self.get_sd_vae_model()
lowercase__ : Dict = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase__ : Optional[int] = model.decode(__lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase__ : int = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__lowercase , __lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Union[str, Any] = self.get_sd_vae_model()
lowercase__ : Dict = self.get_sd_image(__lowercase )
lowercase__ : Tuple = self.get_generator(__lowercase )
with torch.no_grad():
lowercase__ : Optional[Any] = model.encode(__lowercase ).latent_dist
lowercase__ : Optional[Any] = dist.sample(generator=__lowercase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase__ : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase__ : int = torch.tensor(__lowercase )
lowercase__ : Any = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__lowercase , __lowercase , atol=__lowercase )
| 715 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE__ = json.loads(UpperCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE__ = json.loads(UpperCamelCase__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , __A , )
@cached_property
def _snake_case ( self :List[Any] ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
SCREAMING_SNAKE_CASE__ = torch.device("""cpu""" )
SCREAMING_SNAKE_CASE__ = 0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE__ = smp.local_rank()
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , __A )
SCREAMING_SNAKE_CASE__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
return False | 6 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : torch.FloatTensor
lowerCAmelCase : torch.FloatTensor
lowerCAmelCase : Optional[torch.FloatTensor] = None
class a ( _A , _A ):
'''simple docstring'''
lowerCAmelCase : Dict = 2
@register_to_config
def __init__( self : int , __snake_case : float = 0.02 , __snake_case : float = 1_00 , __snake_case : float = 1.007 , __snake_case : float = 80 , __snake_case : float = 0.05 , __snake_case : float = 50 , ):
# standard deviation of the initial noise distribution
UpperCAmelCase_ = sigma_max
# setable values
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None # sigma(t_i)
def lowerCamelCase_ ( self : Tuple , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None ):
return sample
def lowerCamelCase_ ( self : List[Any] , __snake_case : int , __snake_case : Union[str, torch.device] = None ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCAmelCase_ = torch.from_numpy(__snake_case ).to(__snake_case )
UpperCAmelCase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCAmelCase_ = torch.tensor(__snake_case , dtype=torch.floataa , device=__snake_case )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ = self.config.s_noise * randn_tensor(sample.shape , generator=__snake_case ).to(sample.device )
UpperCAmelCase_ = sigma + gamma * sigma
UpperCAmelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase_ ( self : List[str] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : bool = True , ):
UpperCAmelCase_ = sample_hat + sigma_hat * model_output
UpperCAmelCase_ = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : bool = True , ):
UpperCAmelCase_ = sample_prev + sigma_prev * model_output
UpperCAmelCase_ = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : Any , __snake_case : Union[str, Any] ):
raise NotImplementedError()
| 144 | 0 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert (
isinstance(_A , _A ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 472 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 472 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase__: List[Any] = re.compile(r"\s+")
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE__ , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def snake_case_ ( _lowerCAmelCase : int ) -> Dict:
UpperCAmelCase : Optional[int] = [len(SCREAMING_SNAKE_CASE__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE__ ), "line_max": max(SCREAMING_SNAKE_CASE__ )}
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
UpperCAmelCase : Optional[Any] = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ) -> Any:
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str]=5 ) -> int:
UpperCAmelCase : Tuple = ["auto-generated", "autogenerated", "automatically generated"]
UpperCAmelCase : int = example["content"].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str=5 , _lowerCAmelCase : Union[str, Any]=0.0_5 ) -> Any:
UpperCAmelCase : Any = ["unit tests", "test file", "configuration file"]
UpperCAmelCase : Tuple = example["content"].splitlines()
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : int = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCAmelCase : Any = example["content"].count('''\n''' )
UpperCAmelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : List[Any] = ["def ", "class ", "for ", "while "]
UpperCAmelCase : List[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=4 ) -> Optional[Any]:
UpperCAmelCase : str = example["content"].splitlines()
UpperCAmelCase : List[str] = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case_ ( _lowerCAmelCase : Any ) -> int:
UpperCAmelCase : Union[str, Any] = tokenizer(example['''content'''] , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
UpperCAmelCase : List[str] = len(example['''content'''] ) / len(SCREAMING_SNAKE_CASE__ )
return {"ratio": ratio}
def snake_case_ ( _lowerCAmelCase : Any ) -> str:
UpperCAmelCase : Dict = {}
results.update(get_hash(SCREAMING_SNAKE_CASE__ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE__ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE__ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE__ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE__ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE__ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE__ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE__ ) )
return results
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
if not check_uniques(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case_ ( _lowerCAmelCase : Any ) -> Dict:
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE__ ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
os.unlink(SCREAMING_SNAKE_CASE__ )
# Settings
UpperCamelCase__: Tuple = HfArgumentParser(PreprocessingArguments)
UpperCamelCase__: int = parser.parse_args()
if args.num_workers is None:
UpperCamelCase__: Optional[int] = multiprocessing.cpu_count()
UpperCamelCase__: Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase__: Dict = time.time()
UpperCamelCase__: Union[str, Any] = load_dataset(args.dataset_name, split="train")
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
UpperCamelCase__: Union[str, Any] = time.time()
UpperCamelCase__: Dict = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
UpperCamelCase__: Optional[Any] = set(ds.unique("hash"))
UpperCamelCase__: Any = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
UpperCamelCase__: Optional[int] = time.time()
UpperCamelCase__: Union[str, Any] = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase__: Dict = time.time()
UpperCamelCase__: Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
UpperCamelCase__: List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCamelCase__: Union[str, Any] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
UpperCamelCase__: int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase__: Tuple = str(data_dir / F"file-{file_number+1:012}.json")
UpperCamelCase__: Dict = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}")
| 127 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor''']
_lowerCamelCase : List[str] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
class _a :
'''simple docstring'''
def __init__( self , UpperCAmelCase_) -> None:
'''simple docstring'''
lowercase__: str = size
lowercase__: List[str] = [0] * size
lowercase__: Optional[Any] = [0] * size
@staticmethod
def __lowercase ( UpperCAmelCase_) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __lowercase ( UpperCAmelCase_) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_) -> None:
'''simple docstring'''
lowercase__: Tuple = value
while index < self.size:
lowercase__: int = self.get_prev(UpperCAmelCase_) + 1
if current_left_border == index:
lowercase__: List[str] = value
else:
lowercase__: str = max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Union[str, Any] = self.get_next(UpperCAmelCase_)
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowercase__: Optional[int] = 0
while left <= right:
lowercase__: Union[str, Any] = self.get_prev(UpperCAmelCase_)
if left <= current_left:
lowercase__: str = max(UpperCAmelCase_ , self.tree[right])
lowercase__: Optional[Any] = current_left
else:
lowercase__: Union[str, Any] = max(UpperCAmelCase_ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
UpperCamelCase = """▁"""
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
UpperCamelCase__ = BarthezTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowercase__: Dict = vocab_file
lowercase__: Tuple = False if not self.vocab_file else True
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: List[str] = [self.cls_token_id]
lowercase__: List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None) -> List[int]:
'''simple docstring'''
lowercase__: List[Any] = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowercase__: List[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_):
copyfile(self.vocab_file , UpperCAmelCase_)
return (out_vocab_file,)
| 120 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 159 | """simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 159 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase : List[str] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """ernie_m"""
lowerCAmelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Any , _lowercase : int = 250_002 , _lowercase : int = 768 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 3_072 , _lowercase : str = "gelu" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : int = 514 , _lowercase : float = 0.0_2 , _lowercase : int = 1 , _lowercase : float = 1e-05 , _lowercase : int=None , _lowercase : List[Any]=False , _lowercase : List[Any]=0.0 , **_lowercase : Optional[Any] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = initializer_range
A = layer_norm_eps
A = classifier_dropout
A = is_decoder
A = act_dropout
| 91 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
A = Mock()
A = conn, Mock()
A = iter([1, None] )
A = lambda UpperCamelCase__ : next(UpperCamelCase__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=UpperCamelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 91 | 1 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__UpperCamelCase : Optional[int] = ["""text""", """image""", """audio"""]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str] ) -> Optional[int]:
"""simple docstring"""
__a = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
inputs.append(create_inputs(SCREAMING_SNAKE_CASE__ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List ) -> Union[str, Any]:
"""simple docstring"""
__a = []
for output in outputs:
if isinstance(SCREAMING_SNAKE_CASE__, (str, AgentText) ):
output_types.append('text' )
elif isinstance(SCREAMING_SNAKE_CASE__, (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(SCREAMING_SNAKE_CASE__, (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __SCREAMING_SNAKE_CASE :
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
__a = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__a = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = create_inputs(self.tool.inputs )
__a = self.tool(*lowerCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
__a = [outputs]
self.assertListEqual(output_types(lowerCamelCase ) , self.tool.outputs )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = create_inputs(self.tool.inputs )
__a = self.tool(*lowerCamelCase )
if not isinstance(lowerCamelCase , lowerCamelCase ):
__a = [outputs]
self.assertEqual(len(lowerCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase , self.tool.outputs ):
__a = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase , lowerCamelCase ) )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = create_inputs(self.tool.inputs )
__a = []
for _input, input_type in zip(lowerCamelCase , self.tool.inputs ):
if isinstance(lowerCamelCase , lowerCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__a = self.tool(*lowerCamelCase )
if not isinstance(lowerCamelCase , lowerCamelCase ):
__a = [outputs]
self.assertEqual(len(lowerCamelCase ) , len(self.tool.outputs ) ) | 448 |
'''simple docstring'''
from math import isqrt
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2, isqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int = 10**6 ) -> int:
"""simple docstring"""
__a = 0
__a = 1
__a = 7
while prime_candidate < max_prime:
primes_count += is_prime(SCREAMING_SNAKE_CASE__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""") | 448 | 1 |
"""simple docstring"""
def __A ( a_ :int) -> Optional[int]:
__a : Any = 1
for i in range(1 , num + 1):
fact *= i
return fact
def __A ( a_ :int) -> List[str]:
__a : List[Any] = 0
while number > 0:
__a : Optional[Any] = number % 10
sum_of_digits += last_digit
__a : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __A ( a_ :int = 1_00) -> List[str]:
__a : Optional[int] = factorial(snake_case__)
__a : List[str] = split_and_add(snake_case__)
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 706 |
"""simple docstring"""
def __A ( a_ :float , a_ :float) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'{price_plus_tax(100, 0.25) = }')
print(F'{price_plus_tax(125.50, 0.05) = }') | 101 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _a ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : str = [0] * no_of_processes
snake_case__ : Any = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ : Any = burst_time[i]
snake_case__ : list[int] = []
snake_case__ : str = 0
snake_case__ : Optional[int] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ : Union[str, Any] = []
snake_case__ : Tuple = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ : Optional[Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ : Optional[int] = i
total_time += burst_time[target_process]
completed += 1
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _a ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ):
"""simple docstring"""
snake_case__ : List[str] = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ : str = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCAmelCase__ : Tuple = 4
lowerCAmelCase__ : int = [2, 5, 3, 7]
lowerCAmelCase__ : List[Any] = [0, 0, 0, 0]
lowerCAmelCase__ : Union[str, Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase__ : int = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 347 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase = """ChineseCLIPImageProcessor"""
__UpperCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Any ):
'''simple docstring'''
snake_case__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case_ , )
snake_case__ : Tuple = kwargs.pop('''feature_extractor''' )
snake_case__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = self.image_processor
def __call__( self : Optional[int] , snake_case_ : str=None , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None , **snake_case_ : Optional[int] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case__ : Optional[Any] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
snake_case__ : Any = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
snake_case__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __magic_name__ ( self : str , *snake_case_ : Any , **snake_case_ : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : Any , *snake_case_ : int , **snake_case_ : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Dict = self.tokenizer.model_input_names
snake_case__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case_ , )
return self.image_processor_class
| 347 | 1 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase : Tuple = datasets.logging.get_logger(__name__)
lowerCAmelCase : List[str] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase : Union[str, Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase : List[Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=False , lowerCamelCase : Dict=False , lowerCamelCase : int=True , lowerCamelCase : str=False , lowerCamelCase : Tuple="dummy_doc" ):
'''simple docstring'''
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowerCamelCase , sys_doc_lines[doc] , lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = reader.get_mention_assignments(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = get_coref_infos(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(lowerCamelCase , lowerCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 1_00
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def __lowerCAmelCase ( lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def UpperCAmelCase_ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(UpperCamelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=UpperCamelCase , sys_lines=UpperCamelCase , metrics=UpperCamelCase , NP_only=UpperCamelCase , remove_nested=UpperCamelCase , keep_singletons=UpperCamelCase , min_span=UpperCamelCase , )
return score | 39 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes | 39 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase__ = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 332 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase_ ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCamelCase = []
if isinstance(lowercase_ , lowercase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase_ ) )
elif isinstance(lowercase_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase_ ) )
elif isinstance(lowercase_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def __magic_name__ ( lowercase_ , lowercase_ ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCamelCase = []
for d in reversed(lowercase_ ):
idx.append(flat_idx % d )
UpperCamelCase = flat_idx // d
return tuple(reversed(lowercase_ ) )
@torch.jit.ignore
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(lowercase_ ) -> None:
UpperCamelCase = True
for i in range(len(lowercase_ ) ):
UpperCamelCase = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase = l[reversed_idx]
if start_edges is None:
UpperCamelCase = [s == 0 for s in start]
reduce_edge_list(lowercase_ )
if end_edges is None:
UpperCamelCase = [e == (d - 1) for e, d in zip(lowercase_ , lowercase_ )]
reduce_edge_list(lowercase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase_ ) == 0:
return [()]
elif len(lowercase_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCamelCase = []
UpperCamelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase_ , lowercase_ ):
if s == e:
path_list.append(slice(lowercase_ , s + 1 ) )
else:
break
UpperCamelCase = tuple(lowercase_ )
UpperCamelCase = len(lowercase_ )
# start == end, and we're done
if divergence_idx == len(lowercase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase = start[divergence_idx]
return tuple(
path + (slice(lowercase_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase = end[divergence_idx]
return tuple(
path + (slice(lowercase_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase = t.shape[:no_batch_dims]
UpperCamelCase = list(_flat_idx_to_idx(lowercase_ , lowercase_ ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase = list(_flat_idx_to_idx(flat_end - 1 , lowercase_ ) )
# Get an ordered list of slices to perform
UpperCamelCase = _get_minimal_slice_set(
lowercase_ , lowercase_ , lowercase_ , )
UpperCamelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
if not (len(lowercase_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCamelCase = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase_ )]
UpperCamelCase = tuple([max(lowercase_ ) for s in zip(*lowercase_ )] )
def _prep_inputs(lowercase_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase = tensor_tree_map(_prep_inputs , lowercase_ )
UpperCamelCase = None
if _out is not None:
UpperCamelCase = tensor_tree_map(lambda lowercase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCamelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase = 0
UpperCamelCase = prepped_outputs
for _ in range(lowercase_ ):
# Chunk the input
if not low_mem:
UpperCamelCase = _select_chunk
else:
UpperCamelCase = partial(
_chunk_slice , flat_start=lowercase_ , flat_end=min(lowercase_ , i + chunk_size ) , no_batch_dims=len(lowercase_ ) , )
UpperCamelCase = tensor_tree_map(lowercase_ , lowercase_ )
# Run the layer on the chunk
UpperCamelCase = layer(**lowercase_ )
# Allocate space for the output
if out is None:
UpperCamelCase = tensor_tree_map(lambda lowercase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowercase_ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase_ , lowercase_ ):
def assign(lowercase_ , lowercase_ ) -> None:
for k, v in da.items():
if isinstance(lowercase_ , lowercase_ ):
assign(lowercase_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase = da[k]
assign(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
for xa, xa in zip(lowercase_ , lowercase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase = xa
elif isinstance(lowercase_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCamelCase = tensor_tree_map(lambda lowercase_ : t.view(orig_batch_dims + t.shape[1:] ) , lowercase_ )
return out
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE = 512 , ) -> Dict:
"""simple docstring"""
UpperCamelCase = max_chunk_size
UpperCamelCase = None
UpperCamelCase = None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCamelCase = [c for c in candidates if c > min_chunk_size]
UpperCamelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(SCREAMING_SNAKE_CASE ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE , chunk_size=SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
UpperCamelCase = 0
UpperCamelCase = len(SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase = i
UpperCamelCase = (i + len(SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase = True
for aa, aa in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert type(SCREAMING_SNAKE_CASE ) == type(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE : x[0] )]
UpperCamelCase = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = tree_map(lambda SCREAMING_SNAKE_CASE : a.shape if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ) else a , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE )
UpperCamelCase = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase = False
if not consistent:
UpperCamelCase = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
UpperCamelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : List[Any] = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 414 | 0 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_UpperCAmelCase : Any = True
from torch.cuda.amp import autocast
_UpperCAmelCase : Any = logging.getLogger(__name__)
def UpperCamelCase ( lowercase_ : Optional[int]=None , lowercase_ : str=None ) -> Union[str, Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase_ )
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase__ = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
UpperCamelCase__ = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
UpperCamelCase__ = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
UpperCamelCase__ = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
UpperCamelCase__ = field(
default=0.0_5 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
UpperCamelCase__ = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase__ = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
UpperCamelCase__ = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
def __call__( self , snake_case_ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowercase =[{'''input_values''': feature['''input_values''']} for feature in features]
lowercase =[{'''input_ids''': feature['''labels''']} for feature in features]
lowercase =self.processor.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowercase =self.processor.pad(
labels=snake_case_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
lowercase =labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
lowercase =labels
return batch
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def _A( self , snake_case_ , snake_case_ ):
model.train()
lowercase =self._prepare_inputs(snake_case_ )
if self.use_amp:
with autocast():
lowercase =self.compute_loss(snake_case_ , snake_case_ )
else:
lowercase =self.compute_loss(snake_case_ , snake_case_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase =loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowercase =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case_ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case_ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase =datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
lowercase =datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
lowercase =f'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(lowercase_ : List[Any] ):
lowercase =re.sub(lowercase_ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
lowercase =train_dataset.map(lowercase_ , remove_columns=['''sentence'''] )
lowercase =eval_dataset.map(lowercase_ , remove_columns=['''sentence'''] )
def extract_all_chars(lowercase_ : Optional[int] ):
lowercase =''' '''.join(batch['''text'''] )
lowercase =list(set(lowercase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase =train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=train_dataset.column_names , )
lowercase =train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=eval_dataset.column_names , )
lowercase =list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
lowercase ={v: k for k, v in enumerate(lowercase_ )}
lowercase =vocab_dict[''' ''']
del vocab_dict[" "]
lowercase =len(lowercase_ )
lowercase =len(lowercase_ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(lowercase_ , lowercase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase =WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
lowercase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ )
lowercase =WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
lowercase =WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowercase =min(len(lowercase_ ) , data_args.max_train_samples )
lowercase =train_dataset.select(range(lowercase_ ) )
if data_args.max_val_samples is not None:
lowercase =eval_dataset.select(range(data_args.max_val_samples ) )
lowercase =torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowercase_ : Optional[int] ):
lowercase , lowercase =torchaudio.load(batch['''path'''] )
lowercase =resampler(lowercase_ ).squeeze().numpy()
lowercase =1_6_0_0_0
lowercase =batch['''text''']
return batch
lowercase =train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowercase =eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowercase_ : int ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
lowercase =processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(lowercase_ )
return batch
lowercase =train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
lowercase =eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowercase =datasets.load_metric('''wer''' )
def compute_metrics(lowercase_ : List[str] ):
lowercase =pred.predictions
lowercase =np.argmax(lowercase_ , axis=-1 )
lowercase =processor.tokenizer.pad_token_id
lowercase =processor.batch_decode(lowercase_ )
# we do not want to group tokens when computing the metrics
lowercase =processor.batch_decode(pred.label_ids , group_tokens=lowercase_ )
lowercase =wer_metric.compute(predictions=lowercase_ , references=lowercase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase =DataCollatorCTCWithPadding(processor=lowercase_ , padding=lowercase_ )
# Initialize our Trainer
lowercase =CTCTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , compute_metrics=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase =last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase =model_args.model_name_or_path
else:
lowercase =None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase =trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
lowercase =train_result.metrics
lowercase =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
lowercase =min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''train''' , lowercase_ )
trainer.save_metrics('''train''' , lowercase_ )
trainer.save_state()
# Evaluation
lowercase ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase =trainer.evaluate()
lowercase =data_args.max_val_samples if data_args.max_val_samples is not None else len(lowercase_ )
lowercase =min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''eval''' , lowercase_ )
trainer.save_metrics('''eval''' , lowercase_ )
return results
if __name__ == "__main__":
main()
| 72 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__a = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a ( snake_case__: Dict , snake_case__: List[str] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ )
lowercase_ = TestCommand(*snake_case__ )
test_command.run()
lowercase_ = os.path.join(snake_case__ , '''README.md''' )
assert os.path.exists(snake_case__ )
lowercase_ = DatasetInfosDict.from_directory(snake_case__ )
lowercase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ = getattr(dataset_infos['''default'''] , snake_case__ ), getattr(expected_dataset_infos['''default'''] , snake_case__ )
if key == "num_bytes":
assert is_apercent_close(snake_case__ , snake_case__ )
elif key == "splits":
assert list(snake_case__ ) == list(snake_case__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 97 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _A ( _lowerCamelCase ):
def __init__( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = params
__UpperCamelCase : Union[str, Any] = np.array(A__ )
__UpperCamelCase : Union[str, Any] = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Optional[int] , lowerCamelCase__ : int ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : int ):
"""simple docstring"""
return len(self.lengths )
def a ( self : int ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.params.max_model_input_size
__UpperCamelCase : List[str] = self.lengths > max_len
logger.info(f'Splitting {sum(A__ )} too long sequences.' )
def divide_chunks(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Dict = []
if self.params.mlm:
__UpperCamelCase , __UpperCamelCase : List[str] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__UpperCamelCase , __UpperCamelCase : Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCamelCase : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCamelCase : Optional[int] = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
__UpperCamelCase : str = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
__UpperCamelCase : Any = np.array(A__ )
__UpperCamelCase : str = np.array(A__ )
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = len(self )
__UpperCamelCase : Dict = self.lengths > 11
__UpperCamelCase : Tuple = self.token_ids[indices]
__UpperCamelCase : Union[str, Any] = self.lengths[indices]
__UpperCamelCase : str = len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def a ( self : Union[str, Any] ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCamelCase : Any = self.params.special_tok_ids["""unk_token"""]
__UpperCamelCase : Optional[Any] = len(self )
__UpperCamelCase : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCamelCase : Any = (unk_occs / self.lengths) < 0.5
__UpperCamelCase : Tuple = self.token_ids[indices]
__UpperCamelCase : Dict = self.lengths[indices]
__UpperCamelCase : Union[str, Any] = len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def a ( self : int ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def a ( self : Optional[int] , lowerCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCamelCase : str = [t[0] for t in batch]
__UpperCamelCase : List[str] = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
__UpperCamelCase : int = max(A__ )
# Pad token ids
if self.params.mlm:
__UpperCamelCase : Optional[Any] = self.params.special_tok_ids["""pad_token"""]
else:
__UpperCamelCase : Dict = self.params.special_tok_ids["""unk_token"""]
__UpperCamelCase : Optional[int] = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
__UpperCamelCase : Dict = torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCamelCase : Dict = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 700 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _A ( unittest.TestCase ):
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__UpperCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
__UpperCamelCase : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
__UpperCamelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def a ( self : Optional[int] ):
"""simple docstring"""
print(f'Found {torch.cuda.device_count()} devices.' )
__UpperCamelCase : Optional[Any] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def a ( self : Optional[Any] ):
"""simple docstring"""
print(f'Found {torch.cuda.device_count()} devices.' )
__UpperCamelCase : List[str] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Dict = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def a ( self : Any ):
"""simple docstring"""
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' )
__UpperCamelCase : Optional[Any] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase = Accelerator()
UpperCamelCase = (accelerator.state.process_index + 2, 10)
UpperCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase = ''
UpperCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 515 | 0 |
from collections.abc import Callable
import numpy as np
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> np.array:
"""simple docstring"""
lowerCamelCase__ : str = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase__ : Any = np.zeros((n + 1,) )
lowerCamelCase__ : List[Any] = ya
lowerCamelCase__ : Tuple = xa
for k in range(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = y[k] + step_size * ode_func(UpperCAmelCase , y[k] )
lowerCamelCase__ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase , y[k] ) + ode_func(x + step_size , UpperCAmelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Tuple = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
lowerCamelCase__ : Optional[Any] = s_dict.pop(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple = emb.weight.shape
lowerCamelCase__ : str = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : Any = mam_aaa['''args''']
lowerCamelCase__ : Union[str, Any] = mam_aaa['''model''']
lowerCamelCase__ : int = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
lowerCamelCase__ : Dict = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCamelCase__ : Dict = args.share_decoder_input_output_embed
lowerCamelCase__ : Optional[int] = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
lowerCamelCase__ : Dict = SpeechaTextConfig(
vocab_size=UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=UpperCAmelCase , decoder_start_token_id=2 , early_stopping=UpperCAmelCase , )
lowerCamelCase__ : int = SpeechaTextForConditionalGeneration(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Any = model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : List[Any] = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 315 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def snake_case_ ( lowercase__ : str = "laptop" ):
'''simple docstring'''
_lowerCAmelCase =f"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase ={
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
_lowerCAmelCase =BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase =DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
_lowerCAmelCase =item.ha.text
_lowerCAmelCase ="""https://www.amazon.in/""" + item.ha.a["""href"""]
_lowerCAmelCase =item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
_lowerCAmelCase =item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
_lowerCAmelCase ="""Not available"""
try:
_lowerCAmelCase =(
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
_lowerCAmelCase =""""""
try:
_lowerCAmelCase =float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
_lowerCAmelCase =float("""nan""" )
except AttributeError:
pass
_lowerCAmelCase =[
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase =""" """
_lowerCAmelCase =""" """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = '''headphones'''
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 149 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : Optional[int]=32 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : Any=[10, 20, 30, 40] , lowerCamelCase_ : List[Any]=[1, 1, 2, 1] , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[int]="relu" , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=None , ):
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =embeddings_size
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =depths
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_act
_lowerCAmelCase =num_labels
_lowerCAmelCase =scope
_lowerCAmelCase =len(lowerCamelCase_ )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =self.get_config()
return config, pixel_values
def lowerCAmelCase__ ( self : int ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : List[str] ):
_lowerCAmelCase =FlaxRegNetModel(config=lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =FlaxRegNetForImageClassification(config=lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_: Optional[int] = False
a_: Any = False
a_: Union[str, Any] = False
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =FlaxRegNetModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : Union[str, Any] ):
return
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase__ ( self : int ):
pass
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(lowerCamelCase_ )
_lowerCAmelCase =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCAmelCase__ ( self : int ):
def check_hidden_states_output(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
_lowerCAmelCase =model_class(lowerCamelCase_ )
_lowerCAmelCase =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_lowerCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase =model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase =model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=lowerCamelCase_ , return_tensors="""np""" )
_lowerCAmelCase =model(**lowerCamelCase_ )
# verify the logits
_lowerCAmelCase =(1, 1000)
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_lowerCAmelCase =jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 149 | 1 |
from __future__ import annotations
from math import gcd
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int = 2 ,__UpperCamelCase : int = 1 ,__UpperCamelCase : int = 3 ,):
"""simple docstring"""
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ) -> int:
return (pow(__UpperCamelCase ,2 ) + step) % modulus
for _ in range(__UpperCamelCase ):
# These track the position within the cycle detection logic.
A_ = seed
A_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
A_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
A_ = gcd(hare - tortoise ,__UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
A_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__a :int = parser.parse_args()
__a :List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__a :List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}") | 86 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 506 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
SCREAMING_SNAKE_CASE__ : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def _A ( ):
a__ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
a__ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def _A ( ):
a__ : Tuple = """rougeLsum"""
a__ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
a__ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _A ( ):
a__ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
a__ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
a__ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def _A ( ):
a__ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
a__ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def _A ( ):
a__ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
a__ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
a__ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
a__ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] )["""rougeLsum"""]
assert new_score > prev_score
def _A ( ):
a__ : Optional[int] = Path("examples/seq2seq/test_data/wmt_en_ro" )
a__ : List[Any] = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
a__ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 708 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 0 |
def a__ (__lowercase :str , __lowercase :str = " " ) -> list:
_A : Dict = []
_A : Union[str, Any] = 0
for index, char in enumerate(__lowercase ):
if char == separator:
split_words.append(string[last_index:index] )
_A : Optional[Any] = index + 1
elif index + 1 == len(__lowercase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 206 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
_UpperCamelCase : Dict ={
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Tuple = "sew"
def __init__( self ,A__=32 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__=2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.1 ,A__=0.1 ,A__=0.02 ,A__=1E-5 ,A__="group" ,A__="gelu" ,A__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,A__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,A__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,A__=False ,A__=128 ,A__=16 ,A__=True ,A__=0.05 ,A__=10 ,A__=2 ,A__=0.0 ,A__=10 ,A__=0 ,A__="mean" ,A__=False ,A__=False ,A__=256 ,A__=0 ,A__=1 ,A__=2 ,**A__ ,):
super().__init__(**A__ ,pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ )
_A : str = hidden_size
_A : Optional[Any] = feat_extract_norm
_A : Any = feat_extract_activation
_A : Optional[int] = list(A__ )
_A : str = list(A__ )
_A : Optional[Any] = list(A__ )
_A : List[Any] = conv_bias
_A : Optional[int] = num_conv_pos_embeddings
_A : Any = num_conv_pos_embedding_groups
_A : Union[str, Any] = len(self.conv_dim )
_A : List[Any] = num_hidden_layers
_A : str = intermediate_size
_A : List[Any] = squeeze_factor
_A : Tuple = hidden_act
_A : Union[str, Any] = num_attention_heads
_A : Optional[int] = hidden_dropout
_A : Optional[int] = attention_dropout
_A : Optional[int] = activation_dropout
_A : Tuple = feat_proj_dropout
_A : Optional[Any] = final_dropout
_A : str = layerdrop
_A : int = layer_norm_eps
_A : List[str] = initializer_range
_A : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A : Union[str, Any] = apply_spec_augment
_A : str = mask_time_prob
_A : Any = mask_time_length
_A : int = mask_time_min_masks
_A : str = mask_feature_prob
_A : Optional[Any] = mask_feature_length
_A : Dict = mask_feature_min_masks
# ctc loss
_A : Optional[int] = ctc_loss_reduction
_A : List[str] = ctc_zero_infinity
# sequence classification
_A : List[Any] = use_weighted_layer_sum
_A : Optional[int] = classifier_proj_size
@property
def A__ ( self ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 206 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self: Tuple , __UpperCamelCase: Optional[int] , __UpperCamelCase: Tuple=13 , __UpperCamelCase: Union[str, Any]=7 , __UpperCamelCase: Optional[Any]=True , __UpperCamelCase: List[str]=True , __UpperCamelCase: List[Any]=True , __UpperCamelCase: Tuple=True , __UpperCamelCase: Optional[Any]=99 , __UpperCamelCase: int=32 , __UpperCamelCase: Tuple=5 , __UpperCamelCase: Any=4 , __UpperCamelCase: str=37 , __UpperCamelCase: int="gelu" , __UpperCamelCase: str=0.1 , __UpperCamelCase: Optional[Any]=0.1 , __UpperCamelCase: Optional[Any]=512 , __UpperCamelCase: Optional[int]=16 , __UpperCamelCase: Dict=2 , __UpperCamelCase: List[str]=0.0_2 , __UpperCamelCase: int=4 , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def _A ( self: Dict ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _A ( self: Optional[Any] ):
_a = self.prepare_config_and_inputs()
_a = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _A ( self: List[str] ):
_a = self.prepare_config_and_inputs()
_a = config_and_inputs
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase ( snake_case__ , unittest.TestCase ):
a: Optional[int] = True
a: Any = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _A ( self: Dict ):
_a = FlaxBertModelTester(self )
@slow
def _A ( self: Tuple ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
_a = FlaxBertModel.from_pretrained('''bert-base-cased''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 711 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = '▁'
lowerCamelCase :Optional[Any] = {'vocab_file': 'prophetnet.tokenizer'}
lowerCamelCase :Union[str, Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowerCamelCase :str = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowerCamelCase :Union[str, Any] = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = collections.OrderedDict()
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
_a = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
_a = token.rstrip('''\n''' )
_a = index
return vocab
class UpperCAmelCase ( __snake_case ):
a: Any = VOCAB_FILES_NAMES
a: str = PRETRAINED_VOCAB_FILES_MAP
a: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a: str = ["input_ids", "attention_mask"]
def __init__( self: Optional[Any] , __UpperCamelCase: int , __UpperCamelCase: List[Any]="[SEP]" , __UpperCamelCase: Optional[int]="[SEP]" , __UpperCamelCase: List[str]="[SEP]" , __UpperCamelCase: Optional[Any]="[UNK]" , __UpperCamelCase: Any="[PAD]" , __UpperCamelCase: str="[CLS]" , __UpperCamelCase: Tuple="[MASK]" , __UpperCamelCase: Optional[Dict[str, Any]] = None , **__UpperCamelCase: str , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_a = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
_a = f"[unused{i}]"
_a = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_a = 12
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__UpperCamelCase )
def __getstate__( self: Tuple ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self: Optional[Any] , __UpperCamelCase: int ):
_a = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self: Any , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + [1]
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def _A ( self: Any , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None ):
_a = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A ( self: Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset
def _A ( self: Dict ):
_a = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self: str , __UpperCamelCase: str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _A ( self: Dict , __UpperCamelCase: List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self: Tuple , __UpperCamelCase: Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self: List[Any] , __UpperCamelCase: str ):
_a = ''''''.join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip()
return out_string
def _A ( self: Dict , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _A ( self: List[Any] , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_a = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 346 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "swin2sr"
lowerCAmelCase : Optional[Any] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int ,_snake_case : Optional[int]=64 ,_snake_case : List[Any]=1 ,_snake_case : List[Any]=3 ,_snake_case : Union[str, Any]=180 ,_snake_case : List[str]=[6, 6, 6, 6, 6, 6] ,_snake_case : int=[6, 6, 6, 6, 6, 6] ,_snake_case : Tuple=8 ,_snake_case : int=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Tuple=0.0 ,_snake_case : Dict=0.0 ,_snake_case : str=0.1 ,_snake_case : int="gelu" ,_snake_case : Any=False ,_snake_case : Optional[Any]=0.02 ,_snake_case : int=1e-5 ,_snake_case : Any=2 ,_snake_case : List[str]=1.0 ,_snake_case : int="1conv" ,_snake_case : Dict="pixelshuffle" ,**_snake_case : List[str] ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : List[Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : int = embed_dim
lowercase__ : Optional[Any] = depths
lowercase__ : List[Any] = len(_snake_case )
lowercase__ : Dict = num_heads
lowercase__ : Tuple = window_size
lowercase__ : Optional[int] = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : str = drop_path_rate
lowercase__ : str = hidden_act
lowercase__ : List[Any] = use_absolute_embeddings
lowercase__ : Union[str, Any] = layer_norm_eps
lowercase__ : Dict = initializer_range
lowercase__ : List[str] = upscale
lowercase__ : Tuple = img_range
lowercase__ : str = resi_connection
lowercase__ : Dict = upsampler
| 560 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "encoder-decoder"
lowerCAmelCase : int = True
def __init__( self : Optional[int] ,**_snake_case : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(**_snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase__ : Tuple = kwargs.pop('''encoder''' )
lowercase__ : Dict = encoder_config.pop('''model_type''' )
lowercase__ : Union[str, Any] = kwargs.pop('''decoder''' )
lowercase__ : Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__ : Optional[int] = AutoConfig.for_model(_snake_case ,**_snake_case )
lowercase__ : Dict = AutoConfig.for_model(_snake_case ,**_snake_case )
lowercase__ : List[str] = True
@classmethod
def UpperCAmelCase ( cls : str ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Optional[Any] ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase__ : Dict = True
lowercase__ : Any = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : List[Any] = self.encoder.to_dict()
lowercase__ : List[Any] = self.decoder.to_dict()
lowercase__ : Optional[int] = self.__class__.model_type
return output
| 560 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[int] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_snake_case : int = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
_snake_case : Union[str, Any] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ =ElectraTokenizer
def __init__( self, _a=None, _a=None, _a=True, _a="[UNK]", _a="[SEP]", _a="[PAD]", _a="[CLS]", _a="[MASK]", _a=True, _a=None, **_a, ) -> List[Any]:
super().__init__(
_a, tokenizer_file=_a, do_lower_case=_a, unk_token=_a, sep_token=_a, pad_token=_a, cls_token=_a, mask_token=_a, tokenize_chinese_chars=_a, strip_accents=_a, **_a, )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase", _a ) != do_lower_case
or normalizer_state.get("strip_accents", _a ) != strip_accents
or normalizer_state.get("handle_chinese_chars", _a ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE = getattr(_a, normalizer_state.pop("type" ) )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE = normalizer_class(**_a )
__SCREAMING_SNAKE_CASE = do_lower_case
def __lowerCAmelCase ( self, _a, _a=None ) -> str:
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self, _a, _a = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_a, name=_a )
return tuple(_a )
| 214 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =LEDConfig
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ ="""gelu"""
def __init__( self, _a, _a=13, _a=7, _a=True, _a=False, _a=99, _a=32, _a=2, _a=4, _a=37, _a=0.1, _a=0.1, _a=20, _a=2, _a=1, _a=0, _a=4, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor], axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(_a, _a, _a )
__SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]], axis=-1, )
__SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self, _a, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = TFLEDModel(config=_a ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, use_cache=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3), config.vocab_size )
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens], axis=-1 )
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask], axis=-1 )
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a )[0]
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,), output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a, _a, rtol=1E-3 )
def _A ( __snake_case :Any , __snake_case :Dict , __snake_case :List[Any] , __snake_case :List[Any]=None , __snake_case :Optional[Any]=None , __snake_case :Any=None , __snake_case :List[str]=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_a )
def __lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices, 1, inputs_dict["global_attention_mask"], )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
__SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], )
def check_encoder_attentions_output(_a ):
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
__SCREAMING_SNAKE_CASE = len(_a )
self.assertEqual(config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(config.output_hidden_states, _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(_a ) )
self.assertEqual(model.config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> Optional[int]:
# TODO: Head-masking not yet implement
pass
def _A ( __snake_case :Optional[int] ) -> List[Any]:
"""simple docstring"""
return tf.constant(__snake_case , dtype=tf.intaa )
_snake_case : int = 1e-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config, _a, _a )
__SCREAMING_SNAKE_CASE = model(**_a )[0]
__SCREAMING_SNAKE_CASE = (1, 10_24, 7_68)
self.assertEqual(output.shape, _a )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], )
tf.debugging.assert_near(output[:, :3, :3], _a, atol=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config, _a, _a )
__SCREAMING_SNAKE_CASE = model(**_a )[0]
__SCREAMING_SNAKE_CASE = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape, _a )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], )
tf.debugging.assert_near(output[:, :3, :3], _a, atol=1E-3, rtol=1E-3 )
| 214 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCamelCase = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
UpperCamelCase = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
UpperCamelCase = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __a ( self :Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __a ( self :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict=4 , lowerCamelCase__ :List[Any]=False ):
UpperCamelCase__ :Optional[Any] = compute_bleu(
reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ )
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names ) | 45 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Any = '''transfo-xl'''
__magic_name__ : int = ['''mems''']
__magic_name__ : str = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCamelCase__=267_735 , lowerCamelCase__=[20_000, 40_000, 200_000] , lowerCamelCase__=1_024 , lowerCamelCase__=1_024 , lowerCamelCase__=16 , lowerCamelCase__=64 , lowerCamelCase__=4_096 , lowerCamelCase__=4 , lowerCamelCase__=False , lowerCamelCase__=18 , lowerCamelCase__=1_600 , lowerCamelCase__=1_000 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=0 , lowerCamelCase__=-1 , lowerCamelCase__=True , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__="normal" , lowerCamelCase__=0.01 , lowerCamelCase__=0.01 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-5 , lowerCamelCase__=0 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = vocab_size
snake_case__ : int = []
self.cutoffs.extend(lowerCamelCase__)
if proj_share_all_but_first:
snake_case__ : str = [False] + [True] * len(self.cutoffs)
else:
snake_case__ : Optional[Any] = [False] + [False] * len(self.cutoffs)
snake_case__ : Union[str, Any] = d_model
snake_case__ : int = d_embed
snake_case__ : Union[str, Any] = d_head
snake_case__ : int = d_inner
snake_case__ : str = div_val
snake_case__ : Dict = pre_lnorm
snake_case__ : Any = n_layer
snake_case__ : List[str] = n_head
snake_case__ : List[str] = mem_len
snake_case__ : Tuple = same_length
snake_case__ : Optional[Any] = attn_type
snake_case__ : str = clamp_len
snake_case__ : Optional[Any] = sample_softmax
snake_case__ : str = adaptive
snake_case__ : Optional[int] = dropout
snake_case__ : str = dropatt
snake_case__ : List[str] = untie_r
snake_case__ : Any = init
snake_case__ : Tuple = init_range
snake_case__ : Optional[int] = proj_init_std
snake_case__ : Tuple = init_std
snake_case__ : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase__ , **lowerCamelCase__)
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""")
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""")
| 715 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowercase = {"""facebook/blenderbot_small-90M""": 512}
def A__ ( _UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = set()
snake_case__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Any = char
snake_case__ : List[str] = set(_UpperCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="__start__" , lowerCamelCase__="__end__" , lowerCamelCase__="__unk__" , lowerCamelCase__="__null__" , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__)
with open(lowerCamelCase__ , encoding="utf-8") as vocab_handle:
snake_case__ : int = json.load(lowerCamelCase__)
snake_case__ : List[str] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8") as merges_handle:
snake_case__ : Any = merges_handle.read().split("\n")[1:-1]
snake_case__ : Optional[int] = [tuple(merge.split()) for merge in merges]
snake_case__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
snake_case__ : List[str] = {}
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return len(self.encoder)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case__ : Tuple = re.sub("([.,!?()])" , R" \1" , lowerCamelCase__)
snake_case__ : List[Any] = re.sub("(')" , R" \1 " , lowerCamelCase__)
snake_case__ : Dict = re.sub(R"\s{2,}" , " " , lowerCamelCase__)
if "\n" in token:
snake_case__ : Tuple = token.replace("\n" , " __newln__")
snake_case__ : Optional[int] = token.split(" ")
snake_case__ : int = []
for token in tokens:
if not len(lowerCamelCase__):
continue
snake_case__ : str = token.lower()
snake_case__ : List[str] = tuple(lowerCamelCase__)
snake_case__ : str = tuple(list(word[:-1]) + [word[-1] + "</w>"])
snake_case__ : Optional[int] = get_pairs(lowerCamelCase__)
if not pairs:
words.append(lowerCamelCase__)
continue
while True:
snake_case__ : int = min(lowerCamelCase__ , key=lambda lowerCamelCase__: self.bpe_ranks.get(lowerCamelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Any = bigram
snake_case__ : Optional[int] = []
snake_case__ : str = 0
while i < len(lowerCamelCase__):
try:
snake_case__ : Any = word.index(lowerCamelCase__ , lowerCamelCase__)
new_word.extend(word[i:j])
snake_case__ : Tuple = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCamelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
snake_case__ : Optional[int] = tuple(lowerCamelCase__)
snake_case__ : str = new_word
if len(lowerCamelCase__) == 1:
break
else:
snake_case__ : Optional[int] = get_pairs(lowerCamelCase__)
snake_case__ : Tuple = "@@ ".join(lowerCamelCase__)
snake_case__ : Union[str, Any] = word[:-4]
snake_case__ : Any = word
words.append(lowerCamelCase__)
return " ".join(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__) -> List[str]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = re.findall(R"\S+\n?" , lowerCamelCase__)
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__).split(" ")))
return split_tokens
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
snake_case__ : str = token.lower()
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ , self.unk_token)
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = " ".join(lowerCamelCase__).replace("@@ " , "").strip()
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
snake_case__ : int = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
snake_case__ : Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__) + "\n")
snake_case__ : str = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!")
snake_case__ : Tuple = token_index
writer.write(" ".join(lowerCamelCase__) + "\n")
index += 1
return vocab_file, merge_file
| 150 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE : List[str] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> str:
if attention_mask is None:
_lowercase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=99, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=32, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=0.0_2, ) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : List[Any] = seq_length
_lowercase : Optional[int] = is_training
_lowercase : List[str] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Union[str, Any] = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : Optional[int] = initializer_range
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size)
_lowercase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa)), -1)
_lowercase : Union[str, Any] = shift_tokens_right(lowerCamelCase, 1, 2)
_lowercase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase, )
_lowercase : Any = prepare_blenderbot_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return config, inputs_dict
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = 20
_lowercase : Dict = model_class_name(lowerCamelCase)
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'])
_lowercase , _lowercase : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Dict = model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase)
_lowercase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='i4')
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_lowercase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4')
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase, )
_lowercase : int = model.decode(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = 20
_lowercase : Tuple = model_class_name(lowerCamelCase)
_lowercase : Any = model.encode(inputs_dict['input_ids'])
_lowercase , _lowercase : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
], axis=-1, )
_lowercase : Dict = model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4')
_lowercase : List[str] = model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : Tuple = model.decode(lowerCamelCase, lowerCamelCase, decoder_attention_mask=lowerCamelCase)
_lowercase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''')
@require_flax
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Optional[int] = 99
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
_lowercase : Optional[Any] = input_ids.shape[0]
_lowercase : int = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Any = self._get_config_and_data()
_lowercase : Tuple = FlaxBlenderbotForConditionalGeneration(lowerCamelCase)
_lowercase : List[str] = lm_model(input_ids=lowerCamelCase)
_lowercase : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape, lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
_lowercase : str = FlaxBlenderbotForConditionalGeneration(lowerCamelCase)
_lowercase : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa)
_lowercase : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa)
_lowercase : Tuple = lm_model(input_ids=lowerCamelCase, decoder_input_ids=lowerCamelCase)
_lowercase : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa)
_lowercase : Dict = shift_tokens_right(lowerCamelCase, 1, 2)
_lowercase : str = np.equal(lowerCamelCase, 1).astype(np.floataa).sum()
_lowercase : Union[str, Any] = np.equal(lowerCamelCase, 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(lowerCamelCase, n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0], 2).all())
@require_flax
class _lowerCamelCase( _a, unittest.TestCase, _a ):
lowercase_ : Any = True
lowercase_ : Optional[int] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ : Dict = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = FlaxBlenderbotModelTester(self)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase : Optional[Any] = self._prepare_for_class(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model_class(lowerCamelCase)
@jax.jit
def encode_jitted(lowerCamelCase, lowerCamelCase=None, **lowerCamelCase):
return model.encode(input_ids=lowerCamelCase, attention_mask=lowerCamelCase)
with self.subTest('JIT Enabled'):
_lowercase : List[str] = encode_jitted(**lowerCamelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_lowercase : Union[str, Any] = encode_jitted(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase))
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase):
self.assertEqual(jitted_output.shape, output.shape)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase : int = model_class(lowerCamelCase)
_lowercase : Union[str, Any] = model.encode(inputs_dict['input_ids'], inputs_dict['attention_mask'])
_lowercase : Optional[int] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase, lowerCamelCase, lowerCamelCase):
return model.decode(
decoder_input_ids=lowerCamelCase, decoder_attention_mask=lowerCamelCase, encoder_outputs=lowerCamelCase, )
with self.subTest('JIT Enabled'):
_lowercase : Union[str, Any] = decode_jitted(**lowerCamelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_lowercase : Optional[int] = decode_jitted(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase))
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase):
self.assertEqual(jitted_output.shape, output.shape)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : Optional[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1)) * model.config.eos_token_id
_lowercase : Optional[Any] = model(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@unittest.skipUnless(jax_device != 'cpu', '3B test too slow on CPU.')
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
_lowercase : Any = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
_lowercase : Any = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B', from_pt=lowerCamelCase)
_lowercase : Dict = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B')
_lowercase : int = ['Sam']
_lowercase : str = tokenizer(lowerCamelCase, return_tensors='jax')
_lowercase : Optional[Any] = model.generate(**lowerCamelCase, **lowerCamelCase)
_lowercase : Any = 'Sam is a great name. It means "sun" in Gaelic.'
_lowercase : Tuple = tokenizer.batch_decode(lowerCamelCase, **lowerCamelCase)
assert generated_txt[0].strip() == tgt_text
| 89 | '''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__snake_case = logging.get_logger(__name__)
@dataclass
class _a ( __a ):
"""simple docstring"""
A_ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int , **lowercase_ : int ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase_ = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
A_ = field(default=__a , metadata={'''help''': '''Trace the models using torchscript'''} )
A_ = field(default=__a , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
A_ = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ = torch.device("""cpu""" )
lowercase_ = 0
elif is_torch_tpu_available():
lowercase_ = xm.xla_device()
lowercase_ = 0
else:
lowercase_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.n_gpu > 0
| 451 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 73 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.