code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_ ( __A ):
__A : UNetaDModel
__A : KarrasVeScheduler
def __init__( self : Tuple , lowercase_ : UNetaDModel , lowercase_ : KarrasVeScheduler ) -> str:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : int , lowercase_ : int = 1 , lowercase_ : int = 50 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , **lowercase_ : Dict , ) -> Union[Tuple, ImagePipelineOutput]:
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : str = (batch_size, 3, img_size, img_size)
lowercase__ : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowercase__ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowercase__ : str = self.scheduler.schedule[t]
lowercase__ : Optional[int] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowercase__ , lowercase__ : Optional[int] = self.scheduler.add_noise_to_input(lowercase_ , lowercase_ , generator=lowercase_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase__ : Dict = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase__ : Any = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowercase__ : Any = self.scheduler.step_correct(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , step_output.prev_sample , step_output["derivative"] , )
lowercase__ : Dict = step_output.prev_sample
lowercase__ : str = (sample / 2 + 0.5).clamp(0 , 1 )
lowercase__ : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Optional[int] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 87 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 87 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Tuple = {}
lowercase__ : Any = tokenizer(example["content"] , truncation=_lowerCamelCase)["input_ids"]
lowercase__ : int = len(example["content"]) / len(output["input_ids"])
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split='''train''')
print(f"Dataset loaded in {time.time()-t_start:.2f}s")
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f"Dataset tokenized in {time.time()-t_start:.2f}s")
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 87 | import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowercase_ ( _lowerCamelCase : str = "laptop"):
lowercase__ : Optional[Any] = f'''https://www.amazon.in/laptop/s?k={product}'''
lowercase__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
lowercase__ : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase).text)
# Initialize a Pandas dataframe with the column titles
lowercase__ : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"}) , ):
try:
lowercase__ : List[str] = item.ha.text
lowercase__ : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
lowercase__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"}).text
try:
lowercase__ : Tuple = item.find("span" , attrs={"class": "a-icon-alt"}).text
except AttributeError:
lowercase__ : List[Any] = "Not available"
try:
lowercase__ : int = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"}).text.split("₹")[1]
)
except AttributeError:
lowercase__ : Any = ""
try:
lowercase__ : Optional[Any] = float(
(
(
float(product_mrp.strip("₹").replace("," , ""))
- float(product_price.strip("₹").replace("," , ""))
)
/ float(product_mrp.strip("₹").replace("," , ""))
)
* 100)
except ValueError:
lowercase__ : Dict = float("nan")
except AttributeError:
pass
lowercase__ : Tuple = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase__ : str = " "
lowercase__ : Dict = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase = '''headphones'''
get_amazon_product_data(product).to_csv(f"Amazon Product Data for {product}.csv")
| 87 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase = 2048
UpperCamelCase = 4096
UpperCamelCase = 42
UpperCamelCase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
UpperCamelCase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowercase_ ( _lowerCamelCase : Tuple):
def choose_first(_lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
if len(_lowerCamelCase) == 1:
lowercase__ : Union[str, Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase__ : Optional[Any] = {k: [a[k]] for k in a}
if len(a["start_token"]) > 0:
break
return a
lowercase__ : Optional[Any] = {"id": example["id"]}
lowercase__ : Tuple = example["annotations"]
lowercase__ : Optional[Any] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase__ : Union[str, Any] = ["yes"] if 1 in yes_no_answer else ["no"]
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = []
lowercase__ : List[str] = ["<cls>"]
else:
lowercase__ : Any = ["short"]
lowercase__ : Union[str, Any] = choose_first(annotation["short_answers"])
if len(out["start_token"]) == 0:
# answer will be long if short is not available
lowercase__ : int = ["long"]
lowercase__ : Tuple = choose_first(annotation["long_answer"] , is_long_answer=_lowerCamelCase)
lowercase__ : str = []
answer.update(_lowerCamelCase)
# disregard some samples
if len(answer["start_token"]) > 1 or answer["start_token"] == answer["end_token"]:
lowercase__ : List[Any] = True
else:
lowercase__ : Dict = False
lowercase__ : List[str] = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , _lowerCamelCase) for k in cols):
raise ValueError("Issue in ID" , example["id"])
return answer
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=False):
lowercase__ : Optional[int] = _get_single_answer(_lowerCamelCase)
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase__ : List[Any] = example["document"]["tokens"]
lowercase__ : int = []
for i in range(len(doc["token"])):
if not doc["is_html"][i]:
context.append(doc["token"][i])
return {
"context": " ".join(_lowerCamelCase),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase__ : str = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10
lowercase__ : List[str] = example["document"]["tokens"]
lowercase__ : Tuple = answer["start_token"]
lowercase__ : List[str] = answer["end_token"]
lowercase__ : int = []
for i in range(len(doc["token"])):
if not doc["is_html"][i]:
context.append(doc["token"][i])
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase__ : Tuple = " ".join(context[start_token:end_token])
# checking above code
if assertion:
lowercase__ : Optional[Any] = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowercase__ : List[Any] = doc["token"][answer["start_token"] : answer["end_token"]]
lowercase__ : Dict = " ".join([old[i] for i in range(len(_lowerCamelCase)) if not is_html[i]])
if new != old:
print("ID:" , example["id"])
print("New:" , _lowerCamelCase , end="\n")
print("Old:" , _lowerCamelCase , end="\n\n")
return {
"context": " ".join(_lowerCamelCase),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : List[Any]=2048 , _lowerCamelCase : Any=4096 , _lowerCamelCase : Tuple=True):
# overlap will be of doc_stride - q_len
lowercase__ : Dict = get_context_and_ans(_lowerCamelCase , assertion=_lowerCamelCase)
lowercase__ : Dict = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase__ : Union[str, Any] = tokenizer(example["question"]["text"] , out["context"]).input_ids
lowercase__ : Optional[Any] = input_ids.index(tokenizer.sep_token_id) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase__ : int = []
lowercase__ : Dict = []
lowercase__ : Tuple = input_ids[:q_len]
lowercase__ : Dict = range(_lowerCamelCase , len(_lowerCamelCase) , max_length - doc_stride)
for i in doc_start_indices:
lowercase__ : Tuple = i + max_length - q_len
lowercase__ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice)
category.append(answer["category"][0])
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_lowerCamelCase),
"end_token": [-100] * len(_lowerCamelCase),
"category": category,
},
}
lowercase__ : str = out["context"].split()
lowercase__ : str = splitted_context[answer["end_token"]]
lowercase__ : Optional[Any] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]]) , add_special_tokens=_lowerCamelCase , ).input_ids)
lowercase__ : Optional[int] = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]]) , add_special_tokens=_lowerCamelCase).input_ids)
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase__ : Dict = len(tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase).input_ids)
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase__ : Dict = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowercase__ : int = answer["start_token"]
lowercase__ : int = answer["end_token"]
if assertion:
lowercase__ : List[Any] = tokenizer.decode(_lowerCamelCase)
if answer["span"] != new:
print("ISSUE IN TOKENIZATION")
print("OLD:" , answer["span"])
print("NEW:" , _lowerCamelCase , end="\n\n")
if len(_lowerCamelCase) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase__ : Optional[int] = input_ids[:q_len]
lowercase__ : List[Any] = range(_lowerCamelCase , len(_lowerCamelCase) , max_length - doc_stride)
lowercase__ : Union[str, Any] = []
lowercase__ : Any = []
lowercase__ : List[str] = []
lowercase__ : str = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase__ : List[Any] = i + max_length - q_len
lowercase__ : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice)
assert len(inputs[-1]) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase__ : Union[str, Any] = start_token - i + q_len
lowercase__ : Optional[int] = end_token - i + q_len
answers_category.append(answer["category"][0]) # ["short"] -> "short"
else:
lowercase__ : int = -100
lowercase__ : str = -100
answers_category.append("null")
lowercase__ : Optional[int] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCamelCase)
answers_end_token.append(_lowerCamelCase)
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"])
print("New:" , tokenizer.decode(_lowerCamelCase))
print("Old:" , tokenizer.decode(_lowerCamelCase) , end="\n\n")
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : int=2048 , _lowerCamelCase : List[str]=4096 , _lowerCamelCase : Dict=False):
lowercase__ : Dict = get_strided_contexts_and_ans(
_lowerCamelCase , _lowerCamelCase , doc_stride=_lowerCamelCase , max_length=_lowerCamelCase , assertion=_lowerCamelCase , )
return example
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]):
with jsonlines.open(_lowerCamelCase , "a") as writer:
for example in tqdm(_lowerCamelCase , total=len(_lowerCamelCase) , desc="Saving samples ... "):
lowercase__ : Dict = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
})
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase = load_dataset('''natural_questions''')
UpperCamelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCamelCase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
UpperCamelCase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
UpperCamelCase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 87 | import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : int = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
))
return embed
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
lowercase__ : Optional[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias'''))
return attention_weights
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : Tuple = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token"))
return token
def lowercase_ ( ):
lowercase__ : List[str] = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : List[str] = 1000
lowercase__ : Dict = "huggingface/label-files"
lowercase__ : List[Any] = num_labels
lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1)[-1][4:6] == "13":
lowercase__ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21":
lowercase__ : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Union[str, Any] = [2, 2, 20]
lowercase__ : Optional[Any] = [3, 12, 16]
lowercase__ : Optional[Any] = [192, 768, 1024]
lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase)
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
lowercase__ : int = image_size
lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu"))
lowercase__ : Any = OrderedDict()
lowercase__ : int = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase)
for cnt in range(config.depth[idx]):
lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase)
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
image_processor.save_pretrained(_lowerCamelCase)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 87 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((input_a, input_a).count(0) == 0)
def lowercase_ ( ):
assert and_gate(0 , 0) == 0
assert and_gate(0 , 1) == 0
assert and_gate(1 , 0) == 0
assert and_gate(1 , 1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 87 | import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case_ ( unittest.TestCase ,__A ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
lowercase__ : Tuple = load_tool("text-classification" , remote=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> str:
lowercase__ : Optional[int] = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowercase_ , "positive" )
def __UpperCamelCase ( self : Optional[int] ) -> int:
lowercase__ : Tuple = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowercase_ , "positive" )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
lowercase__ : Union[str, Any] = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowercase_ , "positive" )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
lowercase__ : List[Any] = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowercase_ , "positive" )
| 87 | import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case_ ( __A ):
__A : str = "cvt"
def __init__( self : Dict , lowercase_ : int=3 , lowercase_ : Optional[Any]=[7, 3, 3] , lowercase_ : Dict=[4, 2, 2] , lowercase_ : List[Any]=[2, 1, 1] , lowercase_ : Union[str, Any]=[64, 1_92, 3_84] , lowercase_ : Union[str, Any]=[1, 3, 6] , lowercase_ : Optional[Any]=[1, 2, 10] , lowercase_ : Tuple=[4.0, 4.0, 4.0] , lowercase_ : Tuple=[0.0, 0.0, 0.0] , lowercase_ : str=[0.0, 0.0, 0.0] , lowercase_ : Union[str, Any]=[0.0, 0.0, 0.1] , lowercase_ : int=[True, True, True] , lowercase_ : str=[False, False, True] , lowercase_ : Dict=["dw_bn", "dw_bn", "dw_bn"] , lowercase_ : Any=[3, 3, 3] , lowercase_ : Dict=[1, 1, 1] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=[1, 1, 1] , lowercase_ : Any=[1, 1, 1] , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , **lowercase_ : Any , ) -> List[Any]:
super().__init__(**lowercase_ )
lowercase__ : Tuple = num_channels
lowercase__ : Optional[Any] = patch_sizes
lowercase__ : Optional[int] = patch_stride
lowercase__ : Tuple = patch_padding
lowercase__ : Dict = embed_dim
lowercase__ : str = num_heads
lowercase__ : Dict = depth
lowercase__ : List[str] = mlp_ratio
lowercase__ : Any = attention_drop_rate
lowercase__ : Union[str, Any] = drop_rate
lowercase__ : Optional[int] = drop_path_rate
lowercase__ : Any = qkv_bias
lowercase__ : Optional[Any] = cls_token
lowercase__ : Optional[Any] = qkv_projection_method
lowercase__ : Optional[Any] = kernel_qkv
lowercase__ : Optional[int] = padding_kv
lowercase__ : Union[str, Any] = stride_kv
lowercase__ : str = padding_q
lowercase__ : Optional[Any] = stride_q
lowercase__ : Tuple = initializer_range
lowercase__ : str = layer_norm_eps
| 87 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case_ :
def __init__( self : int , lowercase_ : List[str] , lowercase_ : Any=13 , lowercase_ : List[str]=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : Optional[int]=True , lowercase_ : Any=99 , lowercase_ : int=64 , lowercase_ : int=32 , lowercase_ : Tuple=5 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[Any]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=5_12 , lowercase_ : Optional[int]=16 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : int=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ) -> str:
lowercase__ : List[Any] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : Optional[int] = is_training
lowercase__ : Union[str, Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Any = embedding_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : int = num_labels
lowercase__ : str = num_choices
lowercase__ : List[str] = scope
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict = None
if self.use_input_mask:
lowercase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : int = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : List[Any] = None
lowercase__ : Optional[int] = None
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ) -> List[Any]:
lowercase__ : List[str] = MobileBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowercase__ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ )
lowercase__ : Any = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : str , lowercase_ : List[Any] ) -> str:
lowercase__ : Tuple = MobileBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Any = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : int ) -> int:
lowercase__ : Union[str, Any] = MobileBertForNextSentencePrediction(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Any ) -> Any:
lowercase__ : str = MobileBertForPreTraining(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : int = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Dict = MobileBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Tuple ) -> Optional[int]:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Dict = MobileBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Tuple ) -> Dict:
lowercase__ : int = self.num_labels
lowercase__ : Dict = MobileBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Dict = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Any = self.num_choices
lowercase__ : Any = MobileBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : Dict = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Any = config_and_inputs
lowercase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Union[str, Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__A : int = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[str] = True
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict=False ) -> Any:
lowercase__ : str = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase__ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ )
lowercase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def __UpperCamelCase ( self : Any ) -> Dict:
lowercase__ : Optional[int] = MobileBertModelTester(self )
lowercase__ : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int ) -> Tuple:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def __UpperCamelCase ( self : Any ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def __UpperCamelCase ( self : Dict ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def __UpperCamelCase ( self : Dict ) -> int:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
def lowercase_ ( _lowerCamelCase : int):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
UpperCamelCase = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[Any] ) -> Any:
lowercase__ : Dict = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(lowercase_ )
lowercase__ : Dict = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
lowercase__ : Optional[int] = model(lowercase_ )[0]
lowercase__ : str = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowercase_ )
lowercase__ : Optional[Any] = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=lowercase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__ : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 87 | import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case_ ( __A ,__A ):
@register_to_config
def __init__( self : Any , *,
lowercase_ : int = 4 , lowercase_ : int = 7_68 , lowercase_ : int , lowercase_ : Optional[Any] , ) -> Any:
super().__init__()
lowercase__ : Optional[Any] = nn.Parameter(torch.zeros(lowercase_ ) )
# parameters for additional clip time embeddings
lowercase__ : Tuple = nn.Linear(lowercase_ , lowercase_ )
lowercase__ : int = nn.Linear(lowercase_ , lowercase_ )
# parameters for encoder hidden states
lowercase__ : Dict = clip_extra_context_tokens
lowercase__ : str = nn.Linear(
lowercase_ , self.clip_extra_context_tokens * cross_attention_dim )
lowercase__ : Optional[int] = nn.Linear(lowercase_ , lowercase_ )
lowercase__ : Any = nn.LayerNorm(lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , *, lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Any:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowercase__ : List[str] = image_embeddings.shape[0]
lowercase__ : Tuple = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowercase__ : int = classifier_free_guidance_embeddings.expand(
lowercase_ , -1 )
lowercase__ : Tuple = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowercase__ : Optional[Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowercase__ : Tuple = self.embedding_proj(lowercase_ )
lowercase__ : Union[str, Any] = self.clip_image_embeddings_project_to_time_embeddings(lowercase_ )
lowercase__ : List[str] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowercase__ : Dict = self.clip_extra_context_tokens_proj(lowercase_ )
lowercase__ : Union[str, Any] = clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens )
lowercase__ : Dict = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowercase__ : Tuple = self.encoder_hidden_states_proj(lowercase_ )
lowercase__ : Tuple = self.text_encoder_hidden_states_norm(lowercase_ )
lowercase__ : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 87 | import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Dict = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87 | 1 |
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return abs(_lowerCamelCase) if a == 0 else greatest_common_divisor(b % a , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowercase__ , lowercase__ : Dict = y, x % y
return abs(_lowerCamelCase)
def lowercase_ ( ):
try:
lowercase__ : Dict = input("Enter two integers separated by comma (,): ").split(",")
lowercase__ : Optional[Any] = int(nums[0])
lowercase__ : List[Any] = int(nums[1])
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(_lowerCamelCase , _lowerCamelCase)}''')
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase , _lowerCamelCase)}''')
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input")
if __name__ == "__main__":
main()
| 87 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict):
lowercase__ : List[Any] = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = tmp_path / "cache"
lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : int = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : str = features.copy()
lowercase__ : str = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]):
lowercase__ : Union[str, Any] = tmp_path / "cache"
lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Tuple = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = [jsonl_path]
lowercase__ : str = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]):
lowercase__ : str = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
if split:
lowercase__ : Tuple = {split: jsonl_path}
else:
lowercase__ : Tuple = "train"
lowercase__ : int = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Optional[int] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : str = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Optional[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any:
lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : List[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : str = f.read()
assert exported_content == original_content
| 87 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase = True
except (ImportError, AttributeError):
UpperCamelCase = object
def lowercase_ ( *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str]):
pass
UpperCamelCase = False
UpperCamelCase = logging.get_logger('''transformers-cli/serving''')
def lowercase_ ( _lowerCamelCase : Namespace):
lowercase__ : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCamelCase , args.host , args.port , args.workers)
class snake_case_ ( __A ):
__A : dict
class snake_case_ ( __A ):
__A : List[str]
__A : Optional[List[int]]
class snake_case_ ( __A ):
__A : str
class snake_case_ ( __A ):
__A : Any
class snake_case_ ( __A ):
@staticmethod
def __UpperCamelCase ( lowercase_ : ArgumentParser ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=lowercase_ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=lowercase_ , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=lowercase_ , default=88_88 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=lowercase_ , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=lowercase_ , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=lowercase_ , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=lowercase_ , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=lowercase_ )
def __init__( self : int , lowercase_ : Pipeline , lowercase_ : str , lowercase_ : int , lowercase_ : int ) -> Dict:
lowercase__ : Dict = pipeline
lowercase__ : Any = host
lowercase__ : List[str] = port
lowercase__ : List[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F'''Serving model over {host}:{port}''' )
lowercase__ : Optional[Any] = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=lowercase_ , response_class=lowercase_ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=lowercase_ , response_class=lowercase_ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=lowercase_ , response_class=lowercase_ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=lowercase_ , response_class=lowercase_ , methods=["POST"] , ),
] , timeout=6_00 , )
def __UpperCamelCase ( self : int ) -> List[str]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) ) -> Optional[Any]:
try:
lowercase__ : Union[str, Any] = self._pipeline.tokenizer.tokenize(lowercase_ )
if return_ids:
lowercase__ : Union[str, Any] = self._pipeline.tokenizer.convert_tokens_to_ids(lowercase_ )
return ServeTokenizeResult(tokens=lowercase_ , tokens_ids=lowercase_ )
else:
return ServeTokenizeResult(tokens=lowercase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(lowercase_ )} )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[int] = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) , ) -> Optional[int]:
try:
lowercase__ : str = self._pipeline.tokenizer.decode(lowercase_ , lowercase_ , lowercase_ )
return ServeDeTokenizeResult(model="" , text=lowercase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(lowercase_ )} )
async def __UpperCamelCase ( self : Tuple , lowercase_ : int=Body(lowercase_ , embed=lowercase_ ) ) -> List[Any]:
# Check we don't have empty string
if len(lowercase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowercase__ : int = self._pipeline(lowercase_ )
return ServeForwardResult(output=lowercase_ )
except Exception as e:
raise HTTPException(5_00 , {"error": str(lowercase_ )} )
| 87 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( __A ):
__A : List[Any] = (UnCLIPScheduler,)
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> List[str]:
lowercase__ : int = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowercase_ )
return config
def __UpperCamelCase ( self : Tuple ) -> Tuple:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase_ )
def __UpperCamelCase ( self : int ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def __UpperCamelCase ( self : str ) -> int:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase_ )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase_ , prev_timestep=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config(variance_type="fixed_small_log" )
lowercase__ : Optional[int] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_99_49_87 ) ) < 1E-5
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config(variance_type="learned_range" )
lowercase__ : Any = scheduler_class(**lowercase_ )
lowercase__ : Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=lowercase_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=lowercase_ ) - -0.0_01_00_11 < 1E-5
def __UpperCamelCase ( self : str ) -> List[Any]:
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = scheduler_class(**lowercase_ )
lowercase__ : Union[str, Any] = scheduler.timesteps
lowercase__ : List[Any] = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
lowercase__ : Optional[int] = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : List[str] = pred_prev_sample
lowercase__ : List[str] = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(25 )
lowercase__ : Union[str, Any] = scheduler.timesteps
lowercase__ : Tuple = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter
lowercase__ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
lowercase__ : Tuple = model(lowercase_ , lowercase_ )
if i + 1 == timesteps.shape[0]:
lowercase__ : int = None
else:
lowercase__ : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase__ : str = scheduler.step(
lowercase_ , lowercase_ , lowercase_ , prev_timestep=lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : str = pred_prev_sample
lowercase__ : Dict = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def __UpperCamelCase ( self : Any ) -> Tuple:
pass
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
pass
| 87 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Dict = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCamelCase = HfArgumentParser(InitializationArguments)
UpperCamelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCamelCase = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
UpperCamelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCamelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
lowercase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , "width_multiplier" ) )
class snake_case_ :
def __init__( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=13 , lowercase_ : List[str]=64 , lowercase_ : Any=2 , lowercase_ : Dict=3 , lowercase_ : Tuple="swish" , lowercase_ : List[Any]=3 , lowercase_ : List[Any]=32 , lowercase_ : str=0.1 , lowercase_ : Any=0.02 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=10 , lowercase_ : List[str]=None , lowercase_ : int=0.25 , lowercase_ : List[str]=0.0 , lowercase_ : str=0.0 , ) -> Dict:
lowercase__ : Optional[int] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : str = image_size
lowercase__ : Union[str, Any] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : Optional[int] = make_divisible(5_12 * width_multiplier , divisor=8 )
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Union[str, Any] = conv_kernel_size
lowercase__ : Tuple = output_stride
lowercase__ : Dict = classifier_dropout_prob
lowercase__ : Optional[int] = use_labels
lowercase__ : Dict = is_training
lowercase__ : Optional[int] = num_labels
lowercase__ : Tuple = initializer_range
lowercase__ : List[Any] = scope
lowercase__ : List[Any] = width_multiplier
lowercase__ : Optional[Any] = ffn_dropout
lowercase__ : List[Any] = attn_dropout
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Union[str, Any] = None
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : int = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : Any ) -> List[str]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict ) -> Union[str, Any]:
lowercase__ : Any = MobileViTVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[str] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> Tuple:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : List[Any] = MobileViTVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> Union[str, Any]:
lowercase__ : Any = self.num_labels
lowercase__ : Dict = MobileViTVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Tuple = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Tuple = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Optional[int] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__A : Tuple = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : List[str] = False
__A : List[str] = False
__A : List[Any] = False
def __UpperCamelCase ( self : str ) -> List[Any]:
lowercase__ : int = MobileViTVaModelTester(self )
lowercase__ : Optional[int] = MobileViTVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __UpperCamelCase ( self : str ) -> List[Any]:
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __UpperCamelCase ( self : Any ) -> int:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(lowercase_ )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
def check_hidden_states_output(lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict ):
lowercase__ : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : Any = 5
self.assertEqual(len(lowercase_ ) , lowercase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : Optional[int] = 2
for i in range(len(lowercase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> str:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = MobileViTVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowercase_ ( ):
lowercase__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : List[str] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
lowercase_ )
lowercase__ : int = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[int] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**lowercase_ )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase__ : Tuple = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowercase__ : int = model.to(lowercase_ )
lowercase__ : Any = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowercase__ : List[str] = prepare_img()
lowercase__ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**lowercase_ )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowercase_ )
lowercase__ : Optional[Any] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self : Any ) -> List[str]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowercase__ : int = model.to(lowercase_ )
lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowercase__ : List[str] = prepare_img()
lowercase__ : Tuple = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**lowercase_ )
lowercase__ : Tuple = outputs.logits.detach().cpu()
lowercase__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(50, 60)] )
lowercase__ : List[str] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowercase_ )
lowercase__ : int = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
lowercase__ : Dict = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowercase_ )
| 87 | UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
UpperCamelCase = None
def lowercase_ ( ):
lowercase__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_lowerCamelCase , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_lowerCamelCase , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Optional[int] = bool(qa["answers"]["text"])
return qid_to_has_ans
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
def remove_articles(_lowerCamelCase : List[str]):
return ARTICLES_REGEX.sub(" " , _lowerCamelCase)
def white_space_fix(_lowerCamelCase : int):
return " ".join(text.split())
def remove_punc(_lowerCamelCase : Tuple):
lowercase__ : Any = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_lowerCamelCase : Any):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase))))
def lowercase_ ( _lowerCamelCase : str):
if not s:
return []
return normalize_answer(_lowerCamelCase).split()
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any):
return int(normalize_answer(_lowerCamelCase) == normalize_answer(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]):
lowercase__ : str = get_tokens(_lowerCamelCase)
lowercase__ : int = get_tokens(_lowerCamelCase)
lowercase__ : List[Any] = collections.Counter(_lowerCamelCase) & collections.Counter(_lowerCamelCase)
lowercase__ : List[Any] = sum(common.values())
if len(_lowerCamelCase) == 0 or len(_lowerCamelCase) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowercase__ : Union[str, Any] = 1.0 * num_same / len(_lowerCamelCase)
lowercase__ : Optional[Any] = 1.0 * num_same / len(_lowerCamelCase)
lowercase__ : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any):
lowercase__ : Union[str, Any] = {}
lowercase__ : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : List[Any] = qa["id"]
lowercase__ : List[Any] = [t for t in qa["answers"]["text"] if normalize_answer(_lowerCamelCase)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase__ : List[str] = [""]
if qid not in preds:
print(f'''Missing prediction for {qid}''')
continue
lowercase__ : Any = preds[qid]
# Take max over all gold answers
lowercase__ : Tuple = max(compute_exact(_lowerCamelCase , _lowerCamelCase) for a in gold_answers)
lowercase__ : Any = max(compute_fa(_lowerCamelCase , _lowerCamelCase) for a in gold_answers)
return exact_scores, fa_scores
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any):
lowercase__ : Union[str, Any] = {}
for qid, s in scores.items():
lowercase__ : Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase__ : Dict = float(not qid_to_has_ans[qid])
else:
lowercase__ : List[Any] = s
return new_scores
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]=None):
if not qid_list:
lowercase__ : List[str] = len(_lowerCamelCase)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any):
for k in new_eval:
lowercase__ : Union[str, Any] = new_eval[k]
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]):
plt.step(_lowerCamelCase , _lowerCamelCase , color="b" , alpha=0.2 , where="post")
plt.fill_between(_lowerCamelCase , _lowerCamelCase , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(_lowerCamelCase)
plt.savefig(_lowerCamelCase)
plt.clf()
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Optional[int]=None):
lowercase__ : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase: na_probs[k])
lowercase__ : Optional[int] = 0.0
lowercase__ : str = 1.0
lowercase__ : List[str] = 0.0
lowercase__ : List[str] = [1.0]
lowercase__ : Union[str, Any] = [0.0]
lowercase__ : Dict = 0.0
for i, qid in enumerate(_lowerCamelCase):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase__ : str = true_pos / float(i + 1)
lowercase__ : List[str] = true_pos / float(_lowerCamelCase)
if i == len(_lowerCamelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCamelCase)
recalls.append(_lowerCamelCase)
if out_image:
plot_pr_curve(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return {"ap": 100.0 * avg_prec}
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]):
if out_image_dir and not os.path.exists(_lowerCamelCase):
os.makedirs(_lowerCamelCase)
lowercase__ : Union[str, Any] = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowercase__ : Tuple = make_precision_recall_eval(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowercase__ : Union[str, Any] = make_precision_recall_eval(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowercase__ : Union[str, Any] = {k: float(_lowerCamelCase) for k, v in qid_to_has_ans.items()}
lowercase__ : Optional[int] = make_precision_recall_eval(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_lowerCamelCase , _lowerCamelCase , "pr_exact")
merge_eval(_lowerCamelCase , _lowerCamelCase , "pr_f1")
merge_eval(_lowerCamelCase , _lowerCamelCase , "pr_oracle")
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]):
if not qid_list:
return
lowercase__ : List[str] = [na_probs[k] for k in qid_list]
lowercase__ : Tuple = np.ones_like(_lowerCamelCase) / float(len(_lowerCamelCase))
plt.hist(_lowerCamelCase , weights=_lowerCamelCase , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(f'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(_lowerCamelCase , f'''na_prob_hist_{name}.png'''))
plt.clf()
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[int]):
lowercase__ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowercase__ : Union[str, Any] = num_no_ans
lowercase__ : Optional[int] = cur_score
lowercase__ : List[str] = 0.0
lowercase__ : List[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase: na_probs[k])
for i, qid in enumerate(_lowerCamelCase):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase__ : List[Any] = scores[qid]
else:
if preds[qid]:
lowercase__ : str = -1
else:
lowercase__ : List[Any] = 0
cur_score += diff
if cur_score > best_score:
lowercase__ : Any = cur_score
lowercase__ : Dict = na_probs[qid]
return 100.0 * best_score / len(_lowerCamelCase), best_thresh
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
lowercase__ , lowercase__ : int = find_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ , lowercase__ : List[str] = find_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : Optional[Any] = best_exact
lowercase__ : int = exact_thresh
lowercase__ : Tuple = best_fa
lowercase__ : Optional[Any] = fa_thresh
def lowercase_ ( ):
with open(OPTS.data_file) as f:
lowercase__ : List[Any] = json.load(_lowerCamelCase)
lowercase__ : Union[str, Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowercase__ : Optional[int] = json.load(_lowerCamelCase)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowercase__ : Union[str, Any] = json.load(_lowerCamelCase)
else:
lowercase__ : List[str] = {k: 0.0 for k in preds}
lowercase__ : Tuple = make_qid_to_has_ans(_lowerCamelCase) # maps qid to True/False
lowercase__ : int = [k for k, v in qid_to_has_ans.items() if v]
lowercase__ : str = [k for k, v in qid_to_has_ans.items() if not v]
lowercase__ , lowercase__ : Any = get_raw_scores(_lowerCamelCase , _lowerCamelCase)
lowercase__ : int = apply_no_ans_threshold(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.na_prob_thresh)
lowercase__ : int = apply_no_ans_threshold(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.na_prob_thresh)
lowercase__ : Dict = make_eval_dict(_lowerCamelCase , _lowerCamelCase)
if has_ans_qids:
lowercase__ : Dict = make_eval_dict(_lowerCamelCase , _lowerCamelCase , qid_list=_lowerCamelCase)
merge_eval(_lowerCamelCase , _lowerCamelCase , "HasAns")
if no_ans_qids:
lowercase__ : str = make_eval_dict(_lowerCamelCase , _lowerCamelCase , qid_list=_lowerCamelCase)
merge_eval(_lowerCamelCase , _lowerCamelCase , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir)
histogram_na_prob(_lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir , "hasAns")
histogram_na_prob(_lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
else:
print(json.dumps(_lowerCamelCase , indent=2))
if __name__ == "__main__":
UpperCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 87 | import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
UpperCamelCase = logging.getLogger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Any = git.Repo(search_parent_directories=_lowerCamelCase)
lowercase__ : Dict = {
"repo_id": str(_lowerCamelCase),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
}
with open(os.path.join(_lowerCamelCase , "git_log.json") , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=4)
def lowercase_ ( _lowerCamelCase : Dict):
if params.n_gpu <= 0:
lowercase__ : str = 0
lowercase__ : int = -1
lowercase__ : Tuple = True
lowercase__ : List[Any] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs")
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__ : str = int(os.environ["WORLD_SIZE"])
lowercase__ : Any = int(os.environ["N_GPU_NODE"])
lowercase__ : int = int(os.environ["RANK"])
# number of nodes / node ID
lowercase__ : Union[str, Any] = params.world_size // params.n_gpu_per_node
lowercase__ : str = params.global_rank // params.n_gpu_per_node
lowercase__ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"])
assert params.node_id == int(os.environ["NODE_RANK"])
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__ : Tuple = 1
lowercase__ : Any = 0
lowercase__ : Optional[Any] = 0
lowercase__ : str = 0
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 1
lowercase__ : int = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__ : Tuple = params.node_id == 0 and params.local_rank == 0
lowercase__ : Dict = params.n_nodes > 1
# summary
lowercase__ : Optional[Any] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes)
logger.info(PREFIX + "Node ID : %i" % params.node_id)
logger.info(PREFIX + "Local rank : %i" % params.local_rank)
logger.info(PREFIX + "World size : %i" % params.world_size)
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node)
logger.info(PREFIX + "Master : %s" % str(params.is_master))
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node))
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu))
logger.info(PREFIX + "Hostname : %s" % socket.gethostname())
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed")
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowercase_ ( _lowerCamelCase : Any):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
| 87 | def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 87 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : tuple , _lowerCamelCase : Path , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False , ):
output_path.parent.mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCamelCase , _lowerCamelCase , f=output_path.as_posix() , input_names=_lowerCamelCase , output_names=_lowerCamelCase , dynamic_axes=_lowerCamelCase , do_constant_folding=_lowerCamelCase , use_external_data_format=_lowerCamelCase , enable_onnx_checker=_lowerCamelCase , opset_version=_lowerCamelCase , )
else:
export(
_lowerCamelCase , _lowerCamelCase , f=output_path.as_posix() , input_names=_lowerCamelCase , output_names=_lowerCamelCase , dynamic_axes=_lowerCamelCase , do_constant_folding=_lowerCamelCase , opset_version=_lowerCamelCase , )
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : bool = False):
lowercase__ : Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ : Tuple = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
lowercase__ : Union[str, Any] = "cpu"
lowercase__ : Any = Path(_lowerCamelCase)
# VAE DECODER
lowercase__ : Optional[int] = AutoencoderKL.from_pretrained(model_path + "/vae")
lowercase__ : str = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ : Optional[int] = vae_decoder.decode
onnx_export(
_lowerCamelCase , model_args=(
torch.randn(1 , _lowerCamelCase , 25 , 25).to(device=_lowerCamelCase , dtype=_lowerCamelCase),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_lowerCamelCase , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 87 | from PIL import Image
def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int):
lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCamelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 87 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
UpperCamelCase = 5
UpperCamelCase = 10
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Dict = SpeechaTextTokenizer
__A : Optional[int] = False
__A : Dict = True
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().setUp()
lowercase__ : Optional[Any] = sp.SentencePieceProcessor()
spm_model.Load(lowercase_ )
lowercase__ : int = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowercase_ ) )]
lowercase__ : Optional[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : Any = Path(self.tmpdirname )
save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowercase__ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
lowercase__ : Any = "<pad>"
lowercase__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowercase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 10_01 )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
lowercase__ : Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowercase__ : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [2_89, 50, 14, 1_74, 3_86] , )
lowercase__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowercase__ : int = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowercase__ : str = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
# fmt: off
lowercase__ : List[Any] = {"input_ids": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class snake_case_ ( unittest.TestCase ):
__A : List[str] = "valhalla/s2t_mustc_multilinguial_medium"
__A : Tuple = "C'est trop cool"
__A : str = "Esto es genial"
@classmethod
def __UpperCamelCase ( cls : int ) -> Any:
lowercase__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __UpperCamelCase ( self : Any ) -> int:
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
lowercase__ : str = [ES_CODE, 4, 16_01, 47, 76_47, 2]
lowercase__ : Optional[Any] = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase__ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def __UpperCamelCase ( self : str ) -> int:
lowercase__ : Tuple = "fr"
lowercase__ : Tuple = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , lowercase_ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __UpperCamelCase ( self : Dict ) -> Any:
lowercase__ : Any = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowercase__ : Optional[Any] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 87 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87 | 1 |
from string import ascii_uppercase
UpperCamelCase = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCamelCase = dict(enumerate(ascii_uppercase))
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : int = len(_lowerCamelCase)
lowercase__ : Any = 0
while True:
if x == i:
lowercase__ : Union[str, Any] = 0
if len(_lowerCamelCase) == len(_lowerCamelCase):
break
key += key[i]
i += 1
return key
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : str = ""
lowercase__ : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase__ : List[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : Union[str, Any] = ""
lowercase__ : str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase__ : List[Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowercase_ ( ):
lowercase__ : str = "THE GERMAN ATTACK"
lowercase__ : Tuple = "SECRET"
lowercase__ : Optional[int] = generate_key(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Optional[Any] = cipher_text(_lowerCamelCase , _lowerCamelCase)
print(f'''Encrypted Text = {s}''')
print(f'''Original Text = {original_text(_lowerCamelCase , _lowerCamelCase)}''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 87 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
UpperCamelCase = datasets.load_iris()
UpperCamelCase = np.array(data['''data'''])
UpperCamelCase = np.array(data['''target'''])
UpperCamelCase = data['''target_names''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = train_test_split(X, y)
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any]):
return np.linalg.norm(np.array(_lowerCamelCase) - np.array(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Any=5):
lowercase__ : int = zip(_lowerCamelCase , _lowerCamelCase)
# List of distances of all points from the point to be classified
lowercase__ : str = []
for data_point in data:
lowercase__ : str = euclidean_distance(data_point[0] , _lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
lowercase__ : Dict = [i[1] for i in sorted(_lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowercase__ : Tuple = Counter(_lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 87 | import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( __A ):
__A : Union[str, Any] = ["input_features", "attention_mask"]
def __init__( self : Dict , lowercase_ : Union[str, Any]=80 , lowercase_ : int=1_60_00 , lowercase_ : Dict=0.0 , lowercase_ : Optional[Any]=10 , lowercase_ : int=25 , lowercase_ : List[str]="hamming_window" , lowercase_ : Optional[int]=3_27_68.0 , lowercase_ : List[Any]=0.97 , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=True , lowercase_ : Optional[int]=True , lowercase_ : str=False , **lowercase_ : List[Any] , ) -> str:
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ )
lowercase__ : Dict = feature_size
lowercase__ : str = sampling_rate
lowercase__ : Tuple = padding_value
lowercase__ : Optional[Any] = hop_length
lowercase__ : Union[str, Any] = win_length
lowercase__ : Union[str, Any] = frame_signal_scale
lowercase__ : int = preemphasis_coeff
lowercase__ : Any = mel_floor
lowercase__ : str = normalize_means
lowercase__ : Optional[int] = normalize_vars
lowercase__ : Union[str, Any] = win_function
lowercase__ : Dict = return_attention_mask
lowercase__ : Union[str, Any] = win_length * sampling_rate // 10_00
lowercase__ : Optional[int] = hop_length * sampling_rate // 10_00
lowercase__ : Tuple = optimal_fft_length(self.sample_size )
lowercase__ : str = (self.n_fft // 2) + 1
def __UpperCamelCase ( self : List[str] , lowercase_ : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
lowercase__ : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_ )
else:
lowercase__ : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function )
lowercase__ : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowercase__ : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=lowercase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowercase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowercase_ , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> int:
# make sure we normalize float32 arrays
if self.normalize_means:
lowercase__ : int = x[:input_length].mean(axis=0 )
lowercase__ : str = np.subtract(lowercase_ , lowercase_ )
if self.normalize_vars:
lowercase__ : int = x[:input_length].std(axis=0 )
lowercase__ : Optional[int] = np.divide(lowercase_ , lowercase_ )
if input_length < x.shape[0]:
lowercase__ : List[Any] = padding_value
# make sure array is in float32
lowercase__ : Tuple = x.astype(np.floataa )
return x
def __UpperCamelCase ( self : Tuple , lowercase_ : List[np.ndarray] , lowercase_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
lowercase__ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowercase_ , lowercase_ , self.padding_value ) for x, n in zip(lowercase_ , lowercase_ )]
def __call__( self : Dict , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ : Dict = isinstance(lowercase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ : Optional[Any] = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : List[str] = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
lowercase__ : Tuple = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Dict = [raw_speech]
# extract fbank features
lowercase__ : Union[str, Any] = [self._extract_mfsc_features(lowercase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase__ : str = BatchFeature({"input_features": features} )
lowercase__ : Optional[Any] = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
lowercase__ : Any = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowercase_ ):
lowercase__ : int = [np.asarray(lowercase_ , dtype=np.floataa ) for feature in input_features]
lowercase__ : Tuple = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowercase__ : int = [np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase__ : Dict = (
np.array(lowercase_ , dtype=np.intaa )
if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase__ : str = self.normalize(
padded_inputs["input_features"] , attention_mask=lowercase_ )
if return_tensors is not None:
lowercase__ : Tuple = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
| 87 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 | 1 |
from timeit import timeit
def lowercase_ ( _lowerCamelCase : int):
if number < 0:
raise ValueError("the value of input must not be negative")
lowercase__ : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def lowercase_ ( _lowerCamelCase : int):
if number < 0:
raise ValueError("the value of input must not be negative")
lowercase__ : Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase_ ( ):
def do_benchmark(_lowerCamelCase : int) -> None:
lowercase__ : Dict = "import __main__ as z"
print(f'''Benchmark when {number = }:''')
print(f'''{get_set_bits_count_using_modulo_operator(_lowerCamelCase) = }''')
lowercase__ : List[Any] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_lowerCamelCase)
print(f'''timeit() runs in {timing} seconds''')
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(_lowerCamelCase) = }''')
lowercase__ : List[str] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_lowerCamelCase , )
print(f'''timeit() runs in {timing} seconds''')
for number in (25, 37, 58, 0):
do_benchmark(_lowerCamelCase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 87 | import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : int = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
))
return embed
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
lowercase__ : Optional[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias'''))
return attention_weights
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : Tuple = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token"))
return token
def lowercase_ ( ):
lowercase__ : List[str] = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : List[str] = 1000
lowercase__ : Dict = "huggingface/label-files"
lowercase__ : List[Any] = num_labels
lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1)[-1][4:6] == "13":
lowercase__ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21":
lowercase__ : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Union[str, Any] = [2, 2, 20]
lowercase__ : Optional[Any] = [3, 12, 16]
lowercase__ : Optional[Any] = [192, 768, 1024]
lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase)
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
lowercase__ : int = image_size
lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu"))
lowercase__ : Any = OrderedDict()
lowercase__ : int = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase)
for cnt in range(config.depth[idx]):
lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase)
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
image_processor.save_pretrained(_lowerCamelCase)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 87 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class snake_case_ ( __A ):
__A : Dict = "sew"
def __init__( self : int , lowercase_ : int=32 , lowercase_ : Dict=7_68 , lowercase_ : List[Any]=12 , lowercase_ : Any=12 , lowercase_ : int=30_72 , lowercase_ : Any=2 , lowercase_ : Tuple="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : List[Any]=1E-5 , lowercase_ : Dict="group" , lowercase_ : Dict="gelu" , lowercase_ : Dict=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowercase_ : int=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : int=True , lowercase_ : int=0.05 , lowercase_ : Tuple=10 , lowercase_ : str=2 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=10 , lowercase_ : List[str]=0 , lowercase_ : Any="mean" , lowercase_ : Optional[int]=False , lowercase_ : Dict=False , lowercase_ : List[Any]=2_56 , lowercase_ : List[Any]=0 , lowercase_ : List[Any]=1 , lowercase_ : Optional[Any]=2 , **lowercase_ : List[str] , ) -> List[str]:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : Dict = hidden_size
lowercase__ : Union[str, Any] = feat_extract_norm
lowercase__ : List[str] = feat_extract_activation
lowercase__ : Optional[Any] = list(lowercase_ )
lowercase__ : Optional[Any] = list(lowercase_ )
lowercase__ : str = list(lowercase_ )
lowercase__ : Any = conv_bias
lowercase__ : Optional[Any] = num_conv_pos_embeddings
lowercase__ : str = num_conv_pos_embedding_groups
lowercase__ : Tuple = len(self.conv_dim )
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[int] = intermediate_size
lowercase__ : List[Any] = squeeze_factor
lowercase__ : Tuple = hidden_act
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = hidden_dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : int = feat_proj_dropout
lowercase__ : int = final_dropout
lowercase__ : Optional[int] = layerdrop
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : str = initializer_range
lowercase__ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : int = apply_spec_augment
lowercase__ : List[str] = mask_time_prob
lowercase__ : str = mask_time_length
lowercase__ : int = mask_time_min_masks
lowercase__ : str = mask_feature_prob
lowercase__ : Any = mask_feature_length
lowercase__ : Optional[Any] = mask_feature_min_masks
# ctc loss
lowercase__ : int = ctc_loss_reduction
lowercase__ : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 87 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case_ ( __A ):
__A : List[Any] = "blenderbot-small"
__A : Tuple = ["past_key_values"]
__A : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , lowercase_ : Any=5_02_65 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Optional[int]=8 , lowercase_ : Tuple=20_48 , lowercase_ : Any=16 , lowercase_ : Optional[int]=8 , lowercase_ : Any=20_48 , lowercase_ : Any=16 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int="gelu" , lowercase_ : str=5_12 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=1 , lowercase_ : int=False , lowercase_ : Optional[int]=0 , lowercase_ : Tuple=1 , lowercase_ : int=2 , lowercase_ : List[str]=2 , **lowercase_ : Tuple , ) -> Union[str, Any]:
lowercase__ : Any = vocab_size
lowercase__ : int = max_position_embeddings
lowercase__ : Optional[Any] = d_model
lowercase__ : List[str] = encoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : List[Any] = encoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : Union[str, Any] = decoder_attention_heads
lowercase__ : int = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Dict = activation_dropout
lowercase__ : Union[str, Any] = activation_function
lowercase__ : Dict = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : str = use_cache
lowercase__ : Dict = encoder_layers
lowercase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ : Tuple = {0: "batch"}
lowercase__ : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowercase__ : Dict = {0: "batch", 1: "decoder_sequence"}
lowercase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase__ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ , lowercase__ : Any = self.num_layers
for i in range(lowercase_ ):
lowercase__ : List[str] = {0: "batch", 2: "past_sequence + sequence"}
lowercase__ : Any = {0: "batch", 2: "past_sequence + sequence"}
else:
lowercase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : Dict = super().outputs
else:
lowercase__ : List[str] = super(lowercase_ , self ).outputs
if self.use_past:
lowercase__ , lowercase__ : Optional[Any] = self.num_layers
for i in range(lowercase_ ):
lowercase__ : Dict = {0: "batch", 2: "past_sequence + sequence"}
lowercase__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase ( self : Tuple , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
lowercase__ : str = seq_length if not self.use_past else 1
lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowercase__ : Union[str, Any] = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ : Union[str, Any] = common_inputs["input_ids"].shape
lowercase__ : Optional[int] = common_inputs["decoder_input_ids"].shape[1]
lowercase__ , lowercase__ : List[str] = self.num_attention_heads
lowercase__ : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ : List[str] = decoder_seq_length + 3
lowercase__ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase__ : Tuple = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
lowercase__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase__ , lowercase__ : List[str] = self.num_layers
lowercase__ : List[Any] = min(lowercase_ , lowercase_ )
lowercase__ : List[Any] = max(lowercase_ , lowercase_ ) - min_num_layers
lowercase__ : int = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
lowercase__ : str = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase__ : Dict = seqlen + 2
lowercase__ , lowercase__ : List[str] = self.num_layers
lowercase__ , lowercase__ : Optional[Any] = self.num_attention_heads
lowercase__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ : Optional[int] = common_inputs["attention_mask"].dtype
lowercase__ : List[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
lowercase__ : Dict = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : List[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(lowercase_ )
lowercase__ : List[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase__ : Union[str, Any] = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def __UpperCamelCase ( self : str , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
elif self.task == "causal-lm":
lowercase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def __UpperCamelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : Dict = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : str = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87 | import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : Tuple = ["speech"]
def __init__( self : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : str ) -> List[str]:
requires_backends(self , ["speech"] )
class snake_case_ ( metaclass=__A ):
__A : Any = ["speech"]
def __init__( self : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : int ) -> Any:
requires_backends(self , ["speech"] )
| 87 | import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "funnel"
__A : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Optional[Any]=[4, 4, 4] , lowercase_ : int=None , lowercase_ : List[Any]=2 , lowercase_ : List[str]=7_68 , lowercase_ : Any=12 , lowercase_ : List[str]=64 , lowercase_ : Optional[int]=30_72 , lowercase_ : Optional[int]="gelu_new" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : List[str]=0.1 , lowercase_ : List[Any]=None , lowercase_ : List[Any]=1E-9 , lowercase_ : Dict="mean" , lowercase_ : Dict="relative_shift" , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=True , lowercase_ : Dict=True , **lowercase_ : List[Any] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = block_sizes
lowercase__ : int = [1] * len(lowercase_ ) if block_repeats is None else block_repeats
assert len(lowercase_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowercase__ : Any = num_decoder_layers
lowercase__ : List[str] = d_model
lowercase__ : int = n_head
lowercase__ : Union[str, Any] = d_head
lowercase__ : Tuple = d_inner
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : str = attention_dropout
lowercase__ : Tuple = activation_dropout
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[Any] = initializer_std
lowercase__ : Optional[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
lowercase__ : List[str] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
lowercase__ : str = attention_type
lowercase__ : int = separate_cls
lowercase__ : Dict = truncate_seq
lowercase__ : str = pool_q_only
super().__init__(**lowercase_ )
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[Any] ) -> int:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def __UpperCamelCase ( self : Any ) -> Dict:
return len(self.block_sizes )
@num_blocks.setter
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Tuple ) -> Optional[int]:
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 87 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87 | import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase_ ( _lowerCamelCase : bool = True , *_lowerCamelCase : int , **_lowerCamelCase : Optional[Any]):
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.")
lowercase__ : Dict = False
if main_process_only:
lowercase__ : int = PartialState().local_process_index == 0
return _tqdm(*_lowerCamelCase , **_lowerCamelCase , disable=_lowerCamelCase)
| 87 | import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case_ ( __A ):
__A : Tuple = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
__A : str = "CIDAS/clipseg-rd64-refined"
__A : int = "image_segmenter"
__A : Union[str, Any] = CLIPSegForImageSegmentation
__A : str = ["image", "text"]
__A : Union[str, Any] = ["image"]
def __init__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["vision"] )
super().__init__(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : "Image" , lowercase_ : str ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=lowercase_ , return_tensors="pt" )
def __UpperCamelCase ( self : int , lowercase_ : Any ) -> List[str]:
with torch.no_grad():
lowercase__ : List[str] = self.model(**lowercase_ ).logits
return logits
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> Any:
lowercase__ : List[str] = outputs.cpu().detach().numpy()
lowercase__ : int = 0
lowercase__ : Optional[int] = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 87 | from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if isinstance(_lowerCamelCase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_lowerCamelCase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_lowerCamelCase):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''')
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : Union[str, Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : int = size if size is not None else {"shortest_edge": 2_56}
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : Any = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Optional[int] = get_size_dict(lowercase_ , param_name="crop_size" )
lowercase__ : Optional[Any] = do_resize
lowercase__ : Dict = size
lowercase__ : Any = do_center_crop
lowercase__ : int = crop_size
lowercase__ : int = resample
lowercase__ : Tuple = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Dict = offset
lowercase__ : Tuple = do_normalize
lowercase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ) -> np.ndarray:
lowercase__ : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" in size:
lowercase__ : int = get_resize_output_image_size(lowercase_ , size["shortest_edge"] , default_to_square=lowercase_ )
elif "height" in size and "width" in size:
lowercase__ : int = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ) -> np.ndarray:
lowercase__ : List[str] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> Any:
lowercase__ : Tuple = image.astype(np.floataa )
if offset:
lowercase__ : List[str] = image - (scale / 2)
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Tuple , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
lowercase__ : Optional[int] = to_numpy_array(lowercase_ )
if do_resize:
lowercase__ : Any = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ )
if do_center_crop:
lowercase__ : Union[str, Any] = self.center_crop(lowercase_ , size=lowercase_ )
if do_rescale:
lowercase__ : Any = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_ )
if do_normalize:
lowercase__ : Optional[int] = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ )
lowercase__ : Any = to_channel_dimension_format(lowercase_ , lowercase_ )
return image
def __UpperCamelCase ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : int , ) -> PIL.Image.Image:
lowercase__ : Any = do_resize if do_resize is not None else self.do_resize
lowercase__ : str = resample if resample is not None else self.resample
lowercase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Any = offset if offset is not None else self.offset
lowercase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Any = image_mean if image_mean is not None else self.image_mean
lowercase__ : str = image_std if image_std is not None else self.image_std
lowercase__ : Optional[Any] = size if size is not None else self.size
lowercase__ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(lowercase_ , param_name="crop_size" )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowercase__ : Dict = make_batched(lowercase_ )
lowercase__ : Optional[int] = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
lowercase__ : Optional[Any] = {"pixel_values": videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87 | 1 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ : Optional[int] = _TestCommandArgs(dataset=_lowerCamelCase , all_configs=_lowerCamelCase , save_infos=_lowerCamelCase)
lowercase__ : int = TestCommand(*_lowerCamelCase)
test_command.run()
lowercase__ : Dict = os.path.join(_lowerCamelCase , "README.md")
assert os.path.exists(_lowerCamelCase)
lowercase__ : str = DatasetInfosDict.from_directory(_lowerCamelCase)
lowercase__ : Dict = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
})
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase__ , lowercase__ : Union[str, Any] = getattr(dataset_infos["default"] , _lowerCamelCase), getattr(expected_dataset_infos["default"] , _lowerCamelCase)
if key == "num_bytes":
assert is_apercent_close(_lowerCamelCase , _lowerCamelCase)
elif key == "splits":
assert list(_lowerCamelCase) == list(_lowerCamelCase)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes)
else:
result == expected
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]):
lowercase__ : Dict = os.path.join(_lowerCamelCase , "test_file.py")
with open(_lowerCamelCase , "w") as _tmp_file:
_tmp_file.write(_lowerCamelCase)
lowercase__ : List[Any] = get_imports(_lowerCamelCase)
assert parsed_imports == ["os"]
| 87 | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict):
lowercase__ : List[Any] = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = tmp_path / "cache"
lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : int = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : str = features.copy()
lowercase__ : str = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]):
lowercase__ : Union[str, Any] = tmp_path / "cache"
lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Tuple = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = [jsonl_path]
lowercase__ : str = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]):
lowercase__ : str = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
if split:
lowercase__ : Tuple = {split: jsonl_path}
else:
lowercase__ : Tuple = "train"
lowercase__ : int = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Optional[int] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : str = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Optional[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any:
lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : List[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : str = f.read()
assert exported_content == original_content
| 87 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case_ ( __A ):
__A : Tuple = "esm"
def __init__( self : Optional[int] , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : Optional[int]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Optional[int]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=10_26 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=1E-12 , lowercase_ : Optional[Any]="absolute" , lowercase_ : str=True , lowercase_ : Any=None , lowercase_ : int=False , lowercase_ : int=False , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[int] , ) -> Dict:
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_ )
lowercase__ : List[Any] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Tuple = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : Tuple = position_embedding_type
lowercase__ : str = use_cache
lowercase__ : Dict = emb_layer_norm_before
lowercase__ : Tuple = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[str] = EsmFoldConfig(**lowercase_ )
lowercase__ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[Any] = get_default_vocab_list()
else:
lowercase__ : Union[str, Any] = vocab_list
else:
lowercase__ : Optional[int] = None
lowercase__ : str = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowercase_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def __UpperCamelCase ( self : Any ) -> str:
lowercase__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , lowercase_ ):
lowercase__ : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case_ :
__A : str = None
__A : bool = True
__A : bool = False
__A : bool = False
__A : bool = False
__A : float = 0
__A : bool = True
__A : bool = False
__A : int = 128
__A : "TrunkConfig" = None
def __UpperCamelCase ( self : Dict ) -> List[str]:
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowercase_ ):
lowercase__ : str = TrunkConfig(**self.trunk )
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Optional[Any] = asdict(self )
lowercase__ : Union[str, Any] = self.trunk.to_dict()
return output
@dataclass
class snake_case_ :
__A : int = 48
__A : int = 1024
__A : int = 128
__A : int = 32
__A : int = 32
__A : int = 32
__A : float = 0
__A : float = 0
__A : bool = False
__A : int = 4
__A : Optional[int] = 128
__A : "StructureModuleConfig" = None
def __UpperCamelCase ( self : Dict ) -> List[str]:
if self.structure_module is None:
lowercase__ : int = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_ ):
lowercase__ : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase__ : List[str] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[str] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowercase__ : Optional[int] = asdict(self )
lowercase__ : List[str] = self.structure_module.to_dict()
return output
@dataclass
class snake_case_ :
__A : int = 384
__A : int = 128
__A : int = 16
__A : int = 128
__A : int = 12
__A : int = 4
__A : int = 8
__A : float = 0.1
__A : int = 8
__A : int = 1
__A : int = 2
__A : int = 7
__A : int = 10
__A : float = 1e-8
__A : float = 1e5
def __UpperCamelCase ( self : Dict ) -> List[Any]:
return asdict(self )
def lowercase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 87 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowercase_ ( _lowerCamelCase : int):
random.seed(_lowerCamelCase)
np.random.seed(_lowerCamelCase)
torch.manual_seed(_lowerCamelCase)
torch.cuda.manual_seed_all(_lowerCamelCase)
# ^^ safe to call this function even if cuda is not available
class snake_case_ :
def __init__( self : str , lowercase_ : Iterable[torch.nn.Parameter] , lowercase_ : float = 0.99_99 , lowercase_ : float = 0.0 , lowercase_ : int = 0 , lowercase_ : bool = False , lowercase_ : Union[float, int] = 1.0 , lowercase_ : Union[float, int] = 2 / 3 , lowercase_ : Optional[Any] = None , lowercase_ : Dict[str, Any] = None , **lowercase_ : str , ) -> str:
if isinstance(lowercase_ , torch.nn.Module ):
lowercase__ : Tuple = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowercase_ , standard_warn=lowercase_ , )
lowercase__ : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase__ : List[Any] = True
if kwargs.get("max_value" , lowercase_ ) is not None:
lowercase__ : Any = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowercase_ , standard_warn=lowercase_ )
lowercase__ : int = kwargs["max_value"]
if kwargs.get("min_value" , lowercase_ ) is not None:
lowercase__ : Optional[Any] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowercase_ , standard_warn=lowercase_ )
lowercase__ : Dict = kwargs["min_value"]
lowercase__ : Optional[int] = list(lowercase_ )
lowercase__ : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowercase_ ) is not None:
lowercase__ : str = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowercase_ , standard_warn=lowercase_ )
self.to(device=kwargs["device"] )
lowercase__ : int = None
lowercase__ : int = decay
lowercase__ : List[str] = min_decay
lowercase__ : Tuple = update_after_step
lowercase__ : Union[str, Any] = use_ema_warmup
lowercase__ : Union[str, Any] = inv_gamma
lowercase__ : Any = power
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = None # set in `step()`
lowercase__ : str = model_cls
lowercase__ : Union[str, Any] = model_config
@classmethod
def __UpperCamelCase ( cls : Any , lowercase_ : str , lowercase_ : List[Any] ) -> "EMAModel":
lowercase__ , lowercase__ : Dict = model_cls.load_config(lowercase_ , return_unused_kwargs=lowercase_ )
lowercase__ : str = model_cls.from_pretrained(lowercase_ )
lowercase__ : int = cls(model.parameters() , model_cls=lowercase_ , model_config=model.config )
ema_model.load_state_dict(lowercase_ )
return ema_model
def __UpperCamelCase ( self : int , lowercase_ : List[str] ) -> Tuple:
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
lowercase__ : Optional[int] = self.model_cls.from_config(self.model_config )
lowercase__ : Optional[int] = self.state_dict()
state_dict.pop("shadow_params" , lowercase_ )
model.register_to_config(**lowercase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : int ) -> float:
lowercase__ : Any = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase__ : List[str] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase__ : int = (1 + step) / (10 + step)
lowercase__ : Optional[int] = min(lowercase_ , self.decay )
# make sure decay is not smaller than min_decay
lowercase__ : List[Any] = max(lowercase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __UpperCamelCase ( self : List[str] , lowercase_ : Iterable[torch.nn.Parameter] ) -> int:
if isinstance(lowercase_ , torch.nn.Module ):
lowercase__ : str = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowercase_ , standard_warn=lowercase_ , )
lowercase__ : Union[str, Any] = parameters.parameters()
lowercase__ : Tuple = list(lowercase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase__ : Dict = self.get_decay(self.optimization_step )
lowercase__ : Optional[int] = decay
lowercase__ : Any = 1 - decay
lowercase__ : Optional[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowercase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase__ : Optional[int] = deepspeed.zero.GatheredParameters(lowercase_ , modifier_rank=lowercase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : Iterable[torch.nn.Parameter] ) -> None:
lowercase__ : int = list(lowercase_ )
for s_param, param in zip(self.shadow_params , lowercase_ ):
param.data.copy_(s_param.to(param.device ).data )
def __UpperCamelCase ( self : List[Any] , lowercase_ : List[str]=None , lowercase_ : List[Any]=None ) -> None:
lowercase__ : List[Any] = [
p.to(device=lowercase_ , dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ )
for p in self.shadow_params
]
def __UpperCamelCase ( self : Dict ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCamelCase ( self : Tuple , lowercase_ : Iterable[torch.nn.Parameter] ) -> None:
lowercase__ : List[str] = [param.detach().cpu().clone() for param in parameters]
def __UpperCamelCase ( self : str , lowercase_ : Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowercase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase__ : Dict = None
def __UpperCamelCase ( self : List[Any] , lowercase_ : dict ) -> None:
lowercase__ : Tuple = copy.deepcopy(lowercase_ )
lowercase__ : Union[str, Any] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
lowercase__ : Optional[Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowercase_ ):
raise ValueError("Invalid min_decay" )
lowercase__ : Optional[Any] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowercase_ ):
raise ValueError("Invalid optimization_step" )
lowercase__ : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowercase_ ):
raise ValueError("Invalid update_after_step" )
lowercase__ : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowercase_ ):
raise ValueError("Invalid use_ema_warmup" )
lowercase__ : Dict = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
lowercase__ : Tuple = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
lowercase__ : Tuple = state_dict.get("shadow_params" , lowercase_ )
if shadow_params is not None:
lowercase__ : List[Any] = shadow_params
if not isinstance(self.shadow_params , lowercase_ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowercase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 87 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Dict = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | 1 |
UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert len(str(_lowerCamelCase)) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowercase__ : Tuple = year // 100
lowercase__ : List[str] = (5 * (century % 4) + 2) % 7
lowercase__ : str = year % 100
lowercase__ : str = centurian % 12
lowercase__ : Optional[int] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowercase__ : int = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowercase__ : Union[str, Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case_ ( __A ):
def __init__( self : Any , *lowercase_ : int , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=None , **lowercase_ : int ) -> List[str]:
super().__init__(*lowercase_ , **lowercase_ )
lowercase__ : Optional[int] = eval_examples
lowercase__ : Optional[Any] = post_process_function
def __UpperCamelCase ( self : List[str] , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , lowercase_ : str = "eval" ) -> int:
lowercase__ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ : Optional[int] = self.get_eval_dataloader(lowercase_ )
lowercase__ : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : str = self.compute_metrics
lowercase__ : Optional[int] = None
lowercase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ : Optional[Any] = time.time()
try:
lowercase__ : Union[str, Any] = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : Dict = compute_metrics
lowercase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ : Tuple = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
lowercase__ : List[str] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : Dict = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
lowercase__ : str = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[int]=None , lowercase_ : str = "test" ) -> Dict:
lowercase__ : str = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : int = self.compute_metrics
lowercase__ : Optional[int] = None
lowercase__ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ : Dict = time.time()
try:
lowercase__ : Any = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : Any = compute_metrics
lowercase__ : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ : Union[str, Any] = self.post_process_function(lowercase_ , lowercase_ , output.predictions , "predict" )
lowercase__ : Tuple = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : int = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 87 | import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : float | Decimal , _lowerCamelCase : float = 10**-10):
lowercase__ : Optional[int] = a
while True:
lowercase__ : int = Decimal(_lowerCamelCase) - (
Decimal(eval(_lowerCamelCase)) / Decimal(eval(str(diff(_lowerCamelCase)))) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCamelCase)) < precision: # noqa: S307
return float(_lowerCamelCase)
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 87 | def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 87 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCamelCase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
UpperCamelCase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowercase_ ( ):
lowercase__ : Union[str, Any] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , bootstrap_aggregation=_lowerCamelCase , rouge_keys=["rouge2", "rougeL"])
assert isinstance(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , bootstrap_aggregation=_lowerCamelCase , rouge_keys=["rouge2"])
assert (
pd.DataFrame(no_aggregation["rouge2"]).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"]).fmeasure.mean()
)
def lowercase_ ( ):
lowercase__ : List[str] = "rougeLsum"
lowercase__ : int = calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase , rouge_keys=[k])[k]
lowercase__ : str = calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase , rouge_keys=[k])[k]
assert score > score_no_sep
def lowercase_ ( ):
lowercase__ : Union[str, Any] = ["rouge1", "rouge2", "rougeL"]
lowercase__ : Tuple = calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase , rouge_keys=_lowerCamelCase)
lowercase__ : Dict = calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase , rouge_keys=_lowerCamelCase)
assert score_sep == score_no_sep
def lowercase_ ( ):
lowercase__ : Optional[Any] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowercase__ : List[Any] = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase) == calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Dict = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowercase__ : str = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowercase__ : str = calculate_rouge(_lowerCamelCase , _lowerCamelCase , rouge_keys=["rougeLsum"] , newline_sep=_lowerCamelCase)["rougeLsum"]
lowercase__ : Optional[int] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , rouge_keys=["rougeLsum"])["rougeLsum"]
assert new_score > prev_score
def lowercase_ ( ):
lowercase__ : int = Path("examples/seq2seq/test_data/wmt_en_ro")
lowercase__ : Tuple = calculate_rouge_path(data_dir.joinpath("test.source") , data_dir.joinpath("test.target"))
assert isinstance(_lowerCamelCase , _lowerCamelCase)
lowercase__ : str = calculate_rouge_path(
data_dir.joinpath("test.source") , data_dir.joinpath("test.target") , bootstrap_aggregation=_lowerCamelCase)
assert isinstance(_lowerCamelCase , _lowerCamelCase)
| 87 | from PIL import Image
def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int):
lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCamelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 87 | 1 |
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ : set[int] = set()
return any(
node not in visited and depth_first_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for node in graph)
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : int , _lowerCamelCase : set , _lowerCamelCase : set):
visited.add(_lowerCamelCase)
rec_stk.add(_lowerCamelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_lowerCamelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase_ ( _lowerCamelCase : int):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set())
@pytest.fixture
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
class snake_case_ :
def __init__( self : int , lowercase_ : Dict ) -> Tuple:
lowercase__ : List[Any] = metric_id
class snake_case_ :
__A : Any = [MetricMock(__A ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def __UpperCamelCase ( self : Tuple ) -> Tuple:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock())
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))])
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Dict):
if "tmp_path" in args:
lowercase__ : Any = tuple(arg if arg != "tmp_path" else tmp_path for arg in args)
with pytest.warns(_lowerCamelCase , match="https://huggingface.co/docs/evaluate"):
func(*_lowerCamelCase)
| 87 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 1 |
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowercase_ ( ):
lowercase__ : Optional[Any] = Github(os.environ["GITHUB_TOKEN"])
lowercase__ : Dict = g.get_repo("huggingface/diffusers")
lowercase__ : int = repo.get_issues(state="open")
for issue in open_issues:
lowercase__ : str = sorted(issue.get_comments() , key=lambda _lowerCamelCase: i.created_at , reverse=_lowerCamelCase)
lowercase__ : int = comments[0] if len(_lowerCamelCase) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed")
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
| 87 | import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87 | 1 |
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ : Optional[Any] = set()
# edges = list of graph's edges
lowercase__ : Tuple = get_edges(_lowerCamelCase)
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase__ , lowercase__ : str = edges.pop()
chosen_vertices.add(_lowerCamelCase)
chosen_vertices.add(_lowerCamelCase)
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCamelCase)
return chosen_vertices
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ : Tuple = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node))
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 87 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 | 1 |
from __future__ import annotations
class snake_case_ :
def __init__( self : str , lowercase_ : str=None ) -> List[str]:
lowercase__ : str = data
lowercase__ : Union[str, Any] = None
def __repr__( self : int ) -> int:
lowercase__ : Union[str, Any] = []
lowercase__ : List[str] = self
while temp:
string_rep.append(F'''{temp.data}''' )
lowercase__ : Optional[int] = temp.next
return "->".join(lowercase_ )
def lowercase_ ( _lowerCamelCase : list):
if not elements_list:
raise Exception("The Elements List is empty")
lowercase__ : int = Node(elements_list[0])
for i in range(1 , len(_lowerCamelCase)):
lowercase__ : Optional[Any] = Node(elements_list[i])
lowercase__ : Tuple = current.next
return head
def lowercase_ ( _lowerCamelCase : Node):
if head_node is not None and isinstance(_lowerCamelCase , _lowerCamelCase):
print_reverse(head_node.next)
print(head_node.data)
def lowercase_ ( ):
from doctest import testmod
testmod()
lowercase__ : List[str] = make_linked_list([14, 52, 14, 12, 43])
print("Linked List:")
print(_lowerCamelCase)
print("Elements in Reverse:")
print_reverse(_lowerCamelCase)
if __name__ == "__main__":
main()
| 87 | import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : int = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
))
return embed
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
lowercase__ : Optional[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias'''))
return attention_weights
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : Tuple = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token"))
return token
def lowercase_ ( ):
lowercase__ : List[str] = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : List[str] = 1000
lowercase__ : Dict = "huggingface/label-files"
lowercase__ : List[Any] = num_labels
lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1)[-1][4:6] == "13":
lowercase__ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21":
lowercase__ : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Union[str, Any] = [2, 2, 20]
lowercase__ : Optional[Any] = [3, 12, 16]
lowercase__ : Optional[Any] = [192, 768, 1024]
lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase)
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
lowercase__ : int = image_size
lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu"))
lowercase__ : Any = OrderedDict()
lowercase__ : int = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase)
for cnt in range(config.depth[idx]):
lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase)
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
image_processor.save_pretrained(_lowerCamelCase)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=False):
lowercase__ : List[str] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
])
return rename_keys
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowercase__ : str = ""
else:
lowercase__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : List[str] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''')
lowercase__ : int = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : str = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : List[Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowercase__ : str = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]):
lowercase__ : Union[str, Any] = dct.pop(_lowerCamelCase)
lowercase__ : Union[str, Any] = val
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict):
lowercase__ : Union[str, Any] = ViTMSNConfig()
lowercase__ : Dict = 1000
lowercase__ : str = "datasets/huggingface/label-files"
lowercase__ : List[str] = "imagenet-1k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase) , "r"))
lowercase__ : str = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase__ : Any = 384
lowercase__ : Optional[Any] = 1536
lowercase__ : Any = 6
elif "l16" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : Any = 4096
lowercase__ : Any = 24
lowercase__ : List[str] = 16
lowercase__ : Any = 0.1
elif "b4" in checkpoint_url:
lowercase__ : int = 4
elif "l7" in checkpoint_url:
lowercase__ : Optional[int] = 7
lowercase__ : Dict = 1024
lowercase__ : Union[str, Any] = 4096
lowercase__ : str = 24
lowercase__ : Dict = 16
lowercase__ : Any = 0.1
lowercase__ : Union[str, Any] = ViTMSNModel(_lowerCamelCase)
lowercase__ : Dict = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")["target_encoder"]
lowercase__ : Dict = ViTImageProcessor(size=config.image_size)
remove_projection_head(_lowerCamelCase)
lowercase__ : Optional[Any] = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase)
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
lowercase__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
lowercase__ : Any = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase)
lowercase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt")
# forward pass
torch.manual_seed(2)
lowercase__ : int = model(**_lowerCamelCase)
lowercase__ : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase__ : Optional[Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]])
elif "b16" in checkpoint_url:
lowercase__ : Tuple = torch.tensor([[14.2889, -18.9045, 11.7281]])
elif "l16" in checkpoint_url:
lowercase__ : Tuple = torch.tensor([[41.5028, -22.8681, 45.6475]])
elif "b4" in checkpoint_url:
lowercase__ : Optional[int] = torch.tensor([[-4.3868, 5.2932, -0.4137]])
else:
lowercase__ : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]])
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 87 | import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None):
require_version(deps[pkg] , _lowerCamelCase)
| 87 | import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCamelCase = ''''''
UpperCamelCase = ''''''
UpperCamelCase = ''''''
UpperCamelCase = 1 # (0 is vertical, 1 is horizontal)
def lowercase_ ( ):
lowercase__ , lowercase__ : Optional[Any] = get_dataset(_lowerCamelCase , _lowerCamelCase)
print("Processing...")
lowercase__ , lowercase__ , lowercase__ : List[str] = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for index, image in enumerate(_lowerCamelCase):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ : Optional[Any] = random_chars(32)
lowercase__ : Dict = paths[index].split(os.sep)[-1].rsplit("." , 1)[0]
lowercase__ : Optional[Any] = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85])
print(f'''Success {index+1}/{len(_lowerCamelCase)} with {file_name}''')
lowercase__ : Any = []
for anno in new_annos[index]:
lowercase__ : Any = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(_lowerCamelCase)
with open(f'''/{file_root}.txt''' , "w") as outfile:
outfile.write("\n".join(line for line in annos_list))
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[Any] = []
lowercase__ : Optional[int] = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt")):
lowercase__ : Optional[int] = label_file.split(os.sep)[-1].rsplit("." , 1)[0]
with open(_lowerCamelCase) as in_file:
lowercase__ : Optional[Any] = in_file.readlines()
lowercase__ : str = os.path.join(_lowerCamelCase , f'''{label_name}.jpg''')
lowercase__ : Dict = []
for obj_list in obj_lists:
lowercase__ : List[str] = obj_list.rstrip("\n").split(" ")
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
])
if not boxes:
continue
img_paths.append(_lowerCamelCase)
labels.append(_lowerCamelCase)
return img_paths, labels
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int = 1):
lowercase__ : Union[str, Any] = []
lowercase__ : Any = []
lowercase__ : Optional[Any] = []
for idx in range(len(_lowerCamelCase)):
lowercase__ : Tuple = []
lowercase__ : Dict = img_list[idx]
path_list.append(_lowerCamelCase)
lowercase__ : Optional[int] = anno_list[idx]
lowercase__ : Optional[Any] = cva.imread(_lowerCamelCase)
if flip_type == 1:
lowercase__ : List[str] = cva.flip(_lowerCamelCase , _lowerCamelCase)
for bbox in img_annos:
lowercase__ : List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
lowercase__ : Any = cva.flip(_lowerCamelCase , _lowerCamelCase)
for bbox in img_annos:
lowercase__ : Dict = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(_lowerCamelCase)
new_imgs_list.append(_lowerCamelCase)
return new_imgs_list, new_annos_lists, path_list
def lowercase_ ( _lowerCamelCase : int = 32):
assert number_char > 1, "The number of character should greater than 1"
lowercase__ : List[Any] = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase) for _ in range(_lowerCamelCase))
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 87 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ , lowercase__ : int = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
lowercase__ : Union[str, Any] = "A painting of a squirrel eating a burger"
lowercase__ : Any = jax.device_count()
lowercase__ : str = num_samples * [prompt]
lowercase__ : int = sd_pipe.prepare_inputs(lowercase_ )
lowercase__ : Optional[int] = replicate(lowercase_ )
lowercase__ : Tuple = shard(lowercase_ )
lowercase__ : Tuple = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(lowercase_ , jax.device_count() )
lowercase__ : int = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
lowercase__ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
lowercase__ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Dict = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : int ) -> str:
lowercase__ : List[str] = "stabilityai/stable-diffusion-2"
lowercase__ , lowercase__ : Optional[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
lowercase__ , lowercase__ : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
lowercase__ : Tuple = scheduler_params
lowercase__ : int = "A painting of a squirrel eating a burger"
lowercase__ : Optional[int] = jax.device_count()
lowercase__ : Union[str, Any] = num_samples * [prompt]
lowercase__ : Any = sd_pipe.prepare_inputs(lowercase_ )
lowercase__ : Union[str, Any] = replicate(lowercase_ )
lowercase__ : Any = shard(lowercase_ )
lowercase__ : Optional[int] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(lowercase_ , jax.device_count() )
lowercase__ : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Union[str, Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : List[str] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 87 | import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | 1 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowercase_ ( _lowerCamelCase : List[str]):
return EnvironmentCommand()
class snake_case_ ( __A ):
@staticmethod
def __UpperCamelCase ( lowercase_ : ArgumentParser ) -> str:
lowercase__ : List[Any] = parser.add_parser("env" )
download_parser.set_defaults(func=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
lowercase__ : Optional[int] = huggingface_hub.__version__
lowercase__ : List[Any] = "not installed"
lowercase__ : Optional[Any] = "NA"
if is_torch_available():
import torch
lowercase__ : Optional[Any] = torch.__version__
lowercase__ : Optional[Any] = torch.cuda.is_available()
lowercase__ : Union[str, Any] = "not installed"
if is_transformers_available():
import transformers
lowercase__ : Any = transformers.__version__
lowercase__ : str = "not installed"
if is_accelerate_available():
import accelerate
lowercase__ : Optional[int] = accelerate.__version__
lowercase__ : List[str] = "not installed"
if is_xformers_available():
import xformers
lowercase__ : Optional[int] = xformers.__version__
lowercase__ : List[str] = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowercase_ ) )
return info
@staticmethod
def __UpperCamelCase ( lowercase_ : str ) -> Dict:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 87 | import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = '''▁'''
UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = BigBirdTokenizer
__A : Any = BigBirdTokenizerFast
__A : Dict = True
__A : Optional[Any] = True
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
super().setUp()
lowercase__ : Union[str, Any] = self.tokenizer_class(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
lowercase__ : List[Any] = "<s>"
lowercase__ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
lowercase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(lowercase_ ) , 10_04 )
def __UpperCamelCase ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Any = self.get_rust_tokenizer()
lowercase__ : str = "I was born in 92000, and this is falsé."
lowercase__ : Optional[int] = tokenizer.tokenize(lowercase_ )
lowercase__ : Any = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : List[str] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : str = self.get_rust_tokenizer()
lowercase__ : Union[str, Any] = tokenizer.encode(lowercase_ )
lowercase__ : Optional[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Dict ) -> str:
lowercase__ : Optional[Any] = BigBirdTokenizer(lowercase_ , keep_accents=lowercase_ )
lowercase__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [2_85, 46, 10, 1_70, 3_82] , )
lowercase__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase__ : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase__ : Tuple = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> int:
lowercase__ : Optional[Any] = "Hello World!"
lowercase__ : Optional[int] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Tuple = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowercase__ : Optional[Any] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__ : str = " ".join(lowercase_ )
lowercase__ : List[str] = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" , return_token_type_ids=lowercase_ )
lowercase__ : int = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowercase_ )
lowercase__ : Optional[int] = BigBirdConfig(attention_type="original_full" )
lowercase__ : List[str] = BigBirdModel(lowercase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : List[Any] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowercase__ : str = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
# fmt: off
lowercase__ : str = {"input_ids": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 87 | from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
return (preds == labels).mean()
@dataclass
class snake_case_ :
__A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__A : Optional[str] = field(
default=__A ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__A : Optional[str] = field(
default=__A ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__A : Optional[str] = field(
default=__A ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class snake_case_ :
__A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
__A : str = field(metadata={"help": "Should contain the data files for the task."} )
__A : int = field(
default=128 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__A : bool = field(
default=__A ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _lowerCamelCase)
# Set seed
set_seed(training_args.seed)
try:
lowercase__ : Optional[int] = processors[data_args.task_name]()
lowercase__ : Union[str, Any] = processor.get_labels()
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase__ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowerCamelCase : EvalPrediction) -> Dict:
lowercase__ : int = np.argmax(p.predictions , axis=1)
return {"acc": simple_accuracy(_lowerCamelCase , p.label_ids)}
# Data collator
lowercase__ : Dict = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : int = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
lowercase__ : List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowercase__ : Any = trainer.evaluate()
lowercase__ : List[str] = os.path.join(training_args.output_dir , "eval_results.txt")
if trainer.is_world_master():
with open(_lowerCamelCase , "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s" , _lowerCamelCase , _lowerCamelCase)
writer.write("%s = %s\n" % (key, value))
results.update(_lowerCamelCase)
return results
def lowercase_ ( _lowerCamelCase : List[Any]):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 87 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
UpperCamelCase = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
UpperCamelCase = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]):
return float((preds == labels).mean())
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : str):
lowercase__ : int = simple_accuracy(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Optional[Any] = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase))
return {
"accuracy": acc,
"f1": fa,
}
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any):
lowercase__ : int = np.array(_lowerCamelCase)
lowercase__ : List[Any] = np.array(_lowerCamelCase)
lowercase__ : Dict = en_sentvecs.shape[0]
# mean centering
lowercase__ : int = en_sentvecs - np.mean(_lowerCamelCase , axis=0)
lowercase__ : List[Any] = in_sentvecs - np.mean(_lowerCamelCase , axis=0)
lowercase__ : Union[str, Any] = cdist(_lowerCamelCase , _lowerCamelCase , "cosine")
lowercase__ : str = np.array(range(_lowerCamelCase))
lowercase__ : Optional[int] = sim.argsort(axis=1)[:, :10]
lowercase__ : Any = np.any(preds == actual[:, None] , axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Any ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : Tuple ) -> List[Any]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowercase_ , lowercase_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowercase_ , lowercase_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 87 | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict):
lowercase__ : List[Any] = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = tmp_path / "cache"
lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : int = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : str = features.copy()
lowercase__ : str = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]):
lowercase__ : Union[str, Any] = tmp_path / "cache"
lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Tuple = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = [jsonl_path]
lowercase__ : str = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]):
lowercase__ : str = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
if split:
lowercase__ : Tuple = {split: jsonl_path}
else:
lowercase__ : Tuple = "train"
lowercase__ : int = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Optional[int] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : str = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Optional[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any:
lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : List[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : str = f.read()
assert exported_content == original_content
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 87 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class snake_case_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
lowercase__ : List[str] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowercase__ : Tuple = torch.manual_seed(0 )
lowercase__ : List[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
lowercase__ : List[str] = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Any = generator.manual_seed(0 )
lowercase__ : Optional[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : str = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Tuple = "cyberpunk 2077"
lowercase__ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : Any = pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowercase__ : Optional[int] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : Optional[int] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ : str = "A painting of a squirrel eating a burger "
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : List[str] = pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : Any = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : Optional[int] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ : Union[str, Any] = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="numpy" ).images
lowercase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : int = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 87 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Dict = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : int = "xlnet"
__A : List[Any] = ["mems"]
__A : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , lowercase_ : List[Any]=3_20_00 , lowercase_ : Any=10_24 , lowercase_ : Dict=24 , lowercase_ : Any=16 , lowercase_ : List[Any]=40_96 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Dict=True , lowercase_ : Dict="bi" , lowercase_ : Optional[int]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Union[str, Any]=None , lowercase_ : str=True , lowercase_ : List[Any]=False , lowercase_ : Union[str, Any]=False , lowercase_ : List[str]=-1 , lowercase_ : Optional[int]=False , lowercase_ : str="last" , lowercase_ : List[Any]=True , lowercase_ : Any="tanh" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : int=5 , lowercase_ : str=5 , lowercase_ : str=5 , lowercase_ : Optional[int]=1 , lowercase_ : Dict=2 , **lowercase_ : Union[str, Any] , ) -> int:
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = d_model
lowercase__ : Union[str, Any] = n_layer
lowercase__ : Any = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowercase__ : List[Any] = d_model // n_head
lowercase__ : List[str] = ff_activation
lowercase__ : Optional[Any] = d_inner
lowercase__ : int = untie_r
lowercase__ : int = attn_type
lowercase__ : Dict = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = dropout
lowercase__ : Optional[Any] = mem_len
lowercase__ : Any = reuse_len
lowercase__ : List[str] = bi_data
lowercase__ : str = clamp_len
lowercase__ : Tuple = same_length
lowercase__ : List[Any] = summary_type
lowercase__ : str = summary_use_proj
lowercase__ : List[str] = summary_activation
lowercase__ : List[str] = summary_last_dropout
lowercase__ : str = start_n_top
lowercase__ : List[str] = end_n_top
lowercase__ : List[Any] = bos_token_id
lowercase__ : Tuple = pad_token_id
lowercase__ : Any = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , lowercase_ , )
lowercase__ : Tuple = kwargs["use_cache"]
lowercase__ : str = use_mems_eval
lowercase__ : List[str] = use_mems_train
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Any:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase__ : Union[str, Any] = ""
lowercase__ : Optional[Any] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_lowerCamelCase) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase__ , lowercase__ : List[str] = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase__ : Optional[Any] = [1 for i in range(len(_lowerCamelCase))]
# for each character in new_string find corresponding palindromic string
lowercase__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase)):
lowercase__ : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1)
while (
j - k >= 0
and j + k < len(_lowerCamelCase)
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase__ : Any = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase__ : List[str] = j - k + 1 # noqa: E741
lowercase__ : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase__ : Tuple = length[j]
lowercase__ : Optional[int] = j
# create that string
lowercase__ : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87 | 1 |
import requests
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = {"Content-Type": "application/json"}
lowercase__ : List[Any] = requests.post(_lowerCamelCase , json={"text": message_body} , headers=_lowerCamelCase)
if response.status_code != 200:
lowercase__ : int = (
"Request to slack returned an error "
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_lowerCamelCase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 87 | import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87 | 1 |
import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 87 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class snake_case_ :
def __init__( self : Any , lowercase_ : int , lowercase_ : Dict=13 , lowercase_ : Dict=7 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]=2 , lowercase_ : Dict=4 , lowercase_ : List[Any]=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : List[str]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : str=4 , lowercase_ : Any=None , lowercase_ : List[Any]=0 , ) -> Optional[Any]:
lowercase__ : Dict = parent
lowercase__ : int = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : int = is_training
lowercase__ : Dict = use_input_mask
lowercase__ : Any = use_token_type_ids
lowercase__ : List[str] = use_labels
lowercase__ : Any = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : List[Any] = num_choices
lowercase__ : Any = scope
lowercase__ : int = projection_dim
def __UpperCamelCase ( self : Optional[int] ) -> int:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : List[str] = None
lowercase__ : Optional[int] = None
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
lowercase__ : str = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] ) -> int:
lowercase__ : Optional[Any] = TFDPRContextEncoder(config=lowercase_ )
lowercase__ : Dict = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowercase__ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
lowercase__ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCamelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> List[Any]:
lowercase__ : int = TFDPRQuestionEncoder(config=lowercase_ )
lowercase__ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowercase__ : Tuple = model(lowercase_ , token_type_ids=lowercase_ )
lowercase__ : str = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : int ) -> Dict:
lowercase__ : str = TFDPRReader(config=lowercase_ )
lowercase__ : int = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = config_and_inputs
lowercase__ : Optional[int] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__A : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__A : str = False
__A : Any = False
__A : Optional[int] = False
__A : Optional[Any] = False
__A : Tuple = False
def __UpperCamelCase ( self : Any ) -> Tuple:
lowercase__ : Union[str, Any] = TFDPRModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase_ )
@slow
def __UpperCamelCase ( self : str ) -> Any:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = TFDPRQuestionEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = TFDPRReader.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
lowercase__ : Any = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowercase__ : List[str] = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase__ : List[str] = model(lowercase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase__ : Tuple = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 87 | from PIL import Image
def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int):
lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCamelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''YolosFeatureExtractor''']
UpperCamelCase = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : int = RoCBertTokenizer
__A : List[str] = None
__A : Dict = False
__A : Optional[int] = True
__A : List[Any] = filter_non_english
def __UpperCamelCase ( self : Dict ) -> Any:
super().setUp()
lowercase__ : Dict = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowercase__ : List[str] = {}
lowercase__ : List[str] = {}
for i, value in enumerate(lowercase_ ):
lowercase__ : Union[str, Any] = i
lowercase__ : Tuple = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(lowercase_ , lowercase_ , ensure_ascii=lowercase_ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(lowercase_ , lowercase_ , ensure_ascii=lowercase_ )
def __UpperCamelCase ( self : Dict ) -> List[str]:
lowercase__ : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase__ : Any = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(lowercase_ , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase_ ) , [5, 6, 2, 5, 7, 8] )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowercase__ : List[str] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : int = RoCBertBasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __UpperCamelCase ( self : Dict ) -> List[str]:
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowercase__ : Union[str, Any] = {}
for i, token in enumerate(lowercase_ ):
lowercase__ : Optional[Any] = i
lowercase__ : Dict = RoCBertWordpieceTokenizer(vocab=lowercase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __UpperCamelCase ( self : str ) -> Tuple:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __UpperCamelCase ( self : Dict ) -> int:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __UpperCamelCase ( self : Dict ) -> Any:
lowercase__ : int = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
lowercase__ : Optional[Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __UpperCamelCase ( self : List[str] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowercase__ : str = tokenizer_r.encode_plus(
lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ , )
lowercase__ : Any = tokenizer_r.do_lower_case if hasattr(lowercase_ , "do_lower_case" ) else False
lowercase__ : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Optional[Any] = ["的", "人", "有"]
lowercase__ : Optional[Any] = "".join(lowercase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Optional[Any] = True
lowercase__ : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Optional[int] = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : Optional[int] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowercase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : int = False
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Dict = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : List[Any] = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowercase__ : str = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowercase_ )
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def __UpperCamelCase ( self : Tuple ) -> int:
lowercase__ : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase__ : Any = tokenizer.encode("你好" , add_special_tokens=lowercase_ )
lowercase__ : Dict = tokenizer.encode("你是谁" , add_special_tokens=lowercase_ )
lowercase__ : str = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : List[str] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ : int = "你好,你是谁"
lowercase__ : int = tokenizer.tokenize(lowercase_ )
lowercase__ : str = tokenizer.convert_tokens_to_ids(lowercase_ )
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(lowercase_ )
lowercase__ : int = tokenizer.convert_tokens_to_pronunciation_ids(lowercase_ )
lowercase__ : int = tokenizer.prepare_for_model(
lowercase_ , lowercase_ , lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : Any = tokenizer.encode_plus(lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 87 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Union[str, Any]=None , ):
if attention_mask is None:
lowercase__ : Dict = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
lowercase__ : Any = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
lowercase__ : Optional[int] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCamelCase)
if decoder_head_mask is None:
lowercase__ : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase)
if cross_attn_head_mask is None:
lowercase__ : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class snake_case_ :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=13 , lowercase_ : Any=7 , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=False , lowercase_ : Any=99 , lowercase_ : Tuple=16 , lowercase_ : int=2 , lowercase_ : List[str]=4 , lowercase_ : List[str]=4 , lowercase_ : Any="relu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.0 , lowercase_ : Dict=0.0 , lowercase_ : List[str]=20 , lowercase_ : Dict=2 , lowercase_ : int=1 , lowercase_ : Dict=0 , ) -> str:
lowercase__ : List[Any] = parent
lowercase__ : int = batch_size
lowercase__ : Optional[Any] = seq_length
lowercase__ : Dict = is_training
lowercase__ : List[str] = use_labels
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = encoder_layerdrop
lowercase__ : Optional[int] = decoder_layerdrop
lowercase__ : Dict = max_position_embeddings
lowercase__ : List[Any] = eos_token_id
lowercase__ : Dict = pad_token_id
lowercase__ : Union[str, Any] = bos_token_id
def __UpperCamelCase ( self : Optional[Any] ) -> int:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[int] = self.eos_token_id # Eos Token
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase__ : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 )
lowercase__ : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase__ : Tuple = self.get_config()
lowercase__ : Tuple = prepare_mam_aaa_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __UpperCamelCase ( self : List[Any] ) -> Any:
lowercase__ , lowercase__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self : int , lowercase_ : List[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : str = MaMaaaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
lowercase__ : Tuple = inputs_dict["input_ids"]
lowercase__ : List[Any] = inputs_dict["attention_mask"]
lowercase__ : Any = inputs_dict["head_mask"]
# first forward pass
lowercase__ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
lowercase__ , lowercase__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowercase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowercase__ : Tuple = model(lowercase_ , attention_mask=lowercase_ )["last_hidden_state"]
lowercase__ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[
"last_hidden_state"
]
# select random slice
lowercase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-2 ) )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Dict:
lowercase__ : Optional[Any] = MaMaaaModel(config=lowercase_ ).to(lowercase_ ).eval()
lowercase__ : str = model(**lowercase_ )
lowercase__ : Union[str, Any] = outputs.encoder_last_hidden_state
lowercase__ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : List[str] = model.get_encoder()
encoder.save_pretrained(lowercase_ )
lowercase__ : int = MaMaaaEncoder.from_pretrained(lowercase_ ).to(lowercase_ )
lowercase__ : str = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[int] = model.get_decoder()
decoder.save_pretrained(lowercase_ )
lowercase__ : int = MaMaaaDecoder.from_pretrained(lowercase_ ).to(lowercase_ )
lowercase__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__A : Dict = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__A : str = True
__A : List[str] = True
__A : List[Any] = False
__A : List[Any] = False
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : int ) -> str:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : List[Any] = MaMaaaModelTester(self )
lowercase__ : str = ConfigTester(self , config_class=lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase__ , lowercase__ : int = model_class.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertEqual(info["missing_keys"] , [] )
def __UpperCamelCase ( self : Dict ) -> str:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowercase__ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[Any] = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_ ) )
if not self.is_encoder_decoder:
lowercase__ : Optional[int] = inputs["input_ids"]
del inputs["input_ids"]
else:
lowercase__ : str = inputs["input_ids"]
lowercase__ : Optional[Any] = inputs.get("decoder_input_ids" , lowercase_ )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , lowercase_ )
lowercase__ : Optional[int] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowercase__ : int = wte(lowercase_ )
else:
lowercase__ : str = wte(lowercase_ )
lowercase__ : str = wte(lowercase_ )
with torch.no_grad():
model(**lowercase_ )[0]
def __UpperCamelCase ( self : Tuple ) -> str:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
lowercase__ : Any = input_dict["input_ids"]
lowercase__ : List[Any] = input_ids.ne(1 ).to(lowercase_ )
lowercase__ : Optional[int] = MaMaaaForConditionalGeneration(lowercase_ ).eval().to(lowercase_ )
if torch_device == "cuda":
model.half()
model.generate(lowercase_ , attention_mask=lowercase_ )
model.generate(num_beams=4 , do_sample=lowercase_ , early_stopping=lowercase_ , num_return_sequences=3 )
def lowercase_ ( _lowerCamelCase : Any):
return torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase)
UpperCamelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Tuple:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
lowercase__ : int = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
lowercase__ : Optional[Any] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
lowercase__ : int = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
lowercase__ : Any = model(**lowercase_ )[0]
lowercase__ : Any = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
lowercase__ : Tuple = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
# change to intended input
lowercase__ : Optional[Any] = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
lowercase__ : Any = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
lowercase__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
lowercase__ : List[str] = model(**lowercase_ )[0]
lowercase__ : Optional[Any] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
lowercase__ : List[Any] = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
lowercase__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
lowercase__ : int = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
lowercase__ : Union[str, Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
lowercase__ : str = model.generate(
input_ids=dct["input_ids"].to(lowercase_ ) , attention_mask=dct["attention_mask"].to(lowercase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
lowercase__ : Optional[int] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
lowercase__ : List[str] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase_ , skip_special_tokens=lowercase_ )
assert generated == expected_en
| 87 | import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
@staticmethod
def __UpperCamelCase ( *lowercase_ : Optional[int] , **lowercase_ : Optional[int] ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case_ ( unittest.TestCase ):
__A : str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict ) -> Any:
lowercase__ : Union[str, Any] = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : Any , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> Union[str, Any]:
lowercase__ : Tuple = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"score": ANY(lowercase_ ),
"label": ANY(lowercase_ ),
"box": {"xmin": ANY(lowercase_ ), "ymin": ANY(lowercase_ ), "xmax": ANY(lowercase_ ), "ymax": ANY(lowercase_ )},
} , )
import datasets
lowercase__ : List[Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowercase__ : Union[str, Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowercase__ : Optional[Any] = object_detector(lowercase_ , threshold=0.0 )
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"score": ANY(lowercase_ ),
"label": ANY(lowercase_ ),
"box": {"xmin": ANY(lowercase_ ), "ymin": ANY(lowercase_ ), "xmax": ANY(lowercase_ ), "ymax": ANY(lowercase_ )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Dict ) -> int:
pass
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase__ : Optional[int] = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowercase__ : Optional[Any] = AutoModelForObjectDetection.from_pretrained(lowercase_ )
lowercase__ : Tuple = AutoFeatureExtractor.from_pretrained(lowercase_ )
lowercase__ : Any = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
] , )
lowercase__ : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : List[str] = "facebook/detr-resnet-50"
lowercase__ : str = AutoModelForObjectDetection.from_pretrained(lowercase_ )
lowercase__ : int = AutoFeatureExtractor.from_pretrained(lowercase_ )
lowercase__ : Any = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : Optional[int] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
lowercase__ : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowercase__ : Optional[Any] = "facebook/detr-resnet-50"
lowercase__ : Tuple = pipeline("object-detection" , model=lowercase_ )
lowercase__ : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
lowercase__ : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
lowercase__ : int = 0.99_85
lowercase__ : Any = "facebook/detr-resnet-50"
lowercase__ : List[str] = pipeline("object-detection" , model=lowercase_ )
lowercase__ : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : List[str] ) -> List[str]:
lowercase__ : Any = "Narsil/layoutlmv3-finetuned-funsd"
lowercase__ : List[str] = 0.99_93
lowercase__ : Optional[Any] = pipeline("object-detection" , model=lowercase_ , threshold=lowercase_ )
lowercase__ : Dict = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
] , )
| 87 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase_ ( ):
lowercase__ : Dict = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
lowercase__ : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw).convert("RGB")
return image
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : int = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',))
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias'''))
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight"))
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias"))
# fmt: on
return rename_keys
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : List[str]):
lowercase__ : Dict = dct.pop(_lowerCamelCase)
lowercase__ : Optional[Any] = val
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]):
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
lowercase__ : Union[str, Any] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''')
lowercase__ : str = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''')
# next, set bias in the state dict
lowercase__ : Dict = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase), v_bias))
lowercase__ : int = qkv_bias
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : int = 364 if "coco" in model_name else 224
lowercase__ : str = InstructBlipVisionConfig(image_size=_lowerCamelCase).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__ : List[str] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
lowercase__ : Union[str, Any] = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1).to_dict()
elif "vicuna-7b" in model_name:
lowercase__ : Any = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_2001).to_dict()
elif "vicuna-13b" in model_name:
lowercase__ : List[str] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_2001).to_dict()
else:
raise ValueError("Model name not supported")
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__ : Optional[int] = InstructBlipQFormerConfig(vocab_size=3_0523).to_dict()
lowercase__ : str = InstructBlipConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase , qformer_config=_lowerCamelCase)
return config, image_size
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False):
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left")
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"})
if "t5" in model_name:
lowercase__ : Any = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left")
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__ : List[Any] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>")
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
lowercase__ , lowercase__ : Union[str, Any] = get_blipa_config(_lowerCamelCase)
lowercase__ : Any = InstructBlipForConditionalGeneration(_lowerCamelCase).eval()
lowercase__ : List[Any] = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
lowercase__ , lowercase__ : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model...")
lowercase__ : Any = "cuda:1" if torch.cuda.is_available() else "cpu"
lowercase__ : Any = "cuda:2" if torch.cuda.is_available() else "cpu"
lowercase__ , lowercase__ , lowercase__ : Any = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase)
original_model.eval()
print("Done!")
# update state dict keys
lowercase__ : Dict = original_model.state_dict()
lowercase__ : Tuple = create_rename_keys(_lowerCamelCase)
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ : List[Any] = state_dict.pop(_lowerCamelCase)
if key.startswith("Qformer.bert"):
lowercase__ : List[str] = key.replace("Qformer.bert" , "qformer")
if "attention.self" in key:
lowercase__ : Dict = key.replace("self" , "attention")
if "llm_proj" in key:
lowercase__ : Union[str, Any] = key.replace("llm_proj" , "language_projection")
if "t5_proj" in key:
lowercase__ : Optional[Any] = key.replace("t5_proj" , "language_projection")
if key.startswith("llm_model"):
lowercase__ : Optional[Any] = key.replace("llm_model" , "language_model")
if key.startswith("t5"):
lowercase__ : List[str] = key.replace("t5" , "language")
lowercase__ : Dict = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase)
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
lowercase__ : Any = load_demo_image()
lowercase__ : Optional[Any] = "What is unusual about this image?"
# create processor
lowercase__ : str = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase)
lowercase__ : int = InstructBlipProcessor(
image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase , )
lowercase__ : Tuple = processor(images=_lowerCamelCase , text=_lowerCamelCase , return_tensors="pt").to(_lowerCamelCase)
# make sure processor creates exact same pixel values
lowercase__ : Optional[int] = vis_processors["eval"](_lowerCamelCase).unsqueeze(0).to(_lowerCamelCase)
lowercase__ : List[str] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device) , _lowerCamelCase)
original_model.to(_lowerCamelCase)
hf_model.to(_lowerCamelCase)
with torch.no_grad():
if "vicuna" in model_name:
lowercase__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [prompt]}).logits
lowercase__ : Union[str, Any] = hf_model(**_lowerCamelCase).logits
else:
lowercase__ : str = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]}).logits
lowercase__ : List[str] = tokenizer("\n" , return_tensors="pt").input_ids.to(_lowerCamelCase)
lowercase__ : List[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100)
lowercase__ : Dict = hf_model(**_lowerCamelCase , labels=_lowerCamelCase).logits
print("First values of original logits:" , original_logits[0, :3, :3])
print("First values of HF logits:" , logits[0, :3, :3])
# assert values
assert original_logits.shape == logits.shape
lowercase__ : Union[str, Any] = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device) , _lowerCamelCase , atol=_lowerCamelCase)
print("Looks ok!")
print("Generating with original model...")
lowercase__ : Union[str, Any] = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5)
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model...")
lowercase__ : List[str] = hf_model.generate(
**_lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__ : Optional[Any] = 2
print("Original generation:" , _lowerCamelCase)
lowercase__ : int = processor.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase)
lowercase__ : Union[str, Any] = [text.strip() for text in output_text]
print("HF generation:" , _lowerCamelCase)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase)
hf_model.save_pretrained(_lowerCamelCase)
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''')
hf_model.push_to_hub(f'''Salesforce/{model_name}''')
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
UpperCamelCase = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCamelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 87 | import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : int = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
))
return embed
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
lowercase__ : Optional[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias'''))
return attention_weights
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : Tuple = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token"))
return token
def lowercase_ ( ):
lowercase__ : List[str] = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : List[str] = 1000
lowercase__ : Dict = "huggingface/label-files"
lowercase__ : List[Any] = num_labels
lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1)[-1][4:6] == "13":
lowercase__ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21":
lowercase__ : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Union[str, Any] = [2, 2, 20]
lowercase__ : Optional[Any] = [3, 12, 16]
lowercase__ : Optional[Any] = [192, 768, 1024]
lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase)
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
lowercase__ : int = image_size
lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu"))
lowercase__ : Any = OrderedDict()
lowercase__ : int = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase)
for cnt in range(config.depth[idx]):
lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase)
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
image_processor.save_pretrained(_lowerCamelCase)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase_ ( _lowerCamelCase : Vector , _lowerCamelCase : Vector):
return np.sqrt(np.sum((np.asarray(_lowerCamelCase) - np.asarray(_lowerCamelCase)) ** 2))
def lowercase_ ( _lowerCamelCase : Vector , _lowerCamelCase : Vector):
return sum((va - va) ** 2 for va, va in zip(_lowerCamelCase , _lowerCamelCase)) ** (1 / 2)
if __name__ == "__main__":
def lowercase_ ( ):
from timeit import timeit
print("Without Numpy")
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ))
print("With Numpy")
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ))
benchmark()
| 87 | import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87 | 1 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : list[int]):
if len(_lowerCamelCase) == 0:
return array
lowercase__ , lowercase__ : Any = min(_lowerCamelCase), max(_lowerCamelCase)
# Compute the variables
lowercase__ : List[str] = _max - _min + 1
lowercase__ , lowercase__ : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase__ : int = i - _min
lowercase__ : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase__ : Any = 0
for i in range(_lowerCamelCase):
while holes_repeat[i] > 0:
lowercase__ : List[str] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by comma:\n''')
UpperCamelCase = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 87 | import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
def __init__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Dict=13 , lowercase_ : Optional[int]=30 , lowercase_ : Tuple=2 , lowercase_ : List[str]=3 , lowercase_ : str=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=32 , lowercase_ : Optional[int]=2 , lowercase_ : int=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[str]=3 , lowercase_ : Any=0.6 , lowercase_ : Tuple=None , ) -> List[str]:
lowercase__ : Dict = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : List[str] = num_channels
lowercase__ : Union[str, Any] = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : List[str] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Optional[int] = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = mask_ratio
lowercase__ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : Tuple = (image_size // patch_size) ** 2
lowercase__ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : int ) -> Any:
lowercase__ : Optional[int] = TFViTMAEModel(config=lowercase_ )
lowercase__ : str = model(lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ) -> List[Any]:
lowercase__ : List[Any] = TFViTMAEForPreTraining(lowercase_ )
lowercase__ : Dict = model(lowercase_ , training=lowercase_ )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Tuple = 1
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(lowercase_ )
lowercase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(lowercase_ , training=lowercase_ )
lowercase__ : Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
lowercase__ : Any = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__A : List[Any] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__A : int = False
__A : Optional[int] = False
__A : Any = False
__A : Union[str, Any] = False
def __UpperCamelCase ( self : Tuple ) -> str:
lowercase__ : Optional[Any] = TFViTMAEModelTester(self )
lowercase__ : Any = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __UpperCamelCase ( self : Any ) -> Tuple:
pass
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , tf.keras.layers.Layer ) )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(lowercase_ )
lowercase__ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : int = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def __UpperCamelCase ( self : str ) -> Dict:
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : int = model_class(lowercase_ )
lowercase__ : str = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Dict = model(lowercase_ , noise=lowercase_ )
lowercase__ : Dict = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : Union[str, Any] = model(**lowercase_ , noise=lowercase_ )
lowercase__ : str = outputs_dict[0].numpy()
lowercase__ : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowercase_ : Union[str, Any] ):
lowercase__ : Dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowercase_ ):
lowercase__ : Optional[int] = v.numpy()
else:
lowercase__ : Tuple = np.array(lowercase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowercase_ )
lowercase__ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Any = prepare_numpy_arrays(lowercase_ )
lowercase__ : List[Any] = model(lowercase_ , noise=lowercase_ )
lowercase__ : Any = model(**lowercase_ , noise=lowercase_ )
self.assert_outputs_same(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[str] ) -> Tuple:
# make masks reproducible
np.random.seed(2 )
lowercase__ : Dict = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Optional[Any] = tf.constant(lowercase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : str = tf_noise
super().check_pt_tf_models(lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowercase_ )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(lowercase_ , lowercase_ ),)
if isinstance(lowercase_ , lowercase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowercase_ , "_keras_serializable" , lowercase_ )
}
lowercase__ : Dict = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : List[str] = tf.convert_to_tensor(lowercase_ )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : str = main_layer_class(lowercase_ )
lowercase__ : Optional[int] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Any = tf.keras.Model(lowercase_ , outputs=main_layer(lowercase_ ) )
lowercase__ : Union[str, Any] = model(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Tuple = os.path.join(lowercase_ , "keras_model.h5" )
model.save(lowercase_ )
lowercase__ : Tuple = tf.keras.models.load_model(
lowercase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowercase_ , tf.keras.Model )
lowercase__ : Dict = model(lowercase_ )
self.assert_outputs_same(lowercase_ , lowercase_ )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowercase_ )
lowercase__ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = model(lowercase_ , noise=lowercase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : List[Any] = 0
else:
lowercase__ : str = outputs.logits.numpy()
lowercase__ : Any = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ , saved_model=lowercase_ )
lowercase__ : Dict = model_class.from_pretrained(lowercase_ )
lowercase__ : Optional[int] = model(lowercase_ , noise=lowercase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[Any] = after_outputs["last_hidden_state"].numpy()
lowercase__ : str = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Dict = 0
lowercase__ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(lowercase_ )
lowercase__ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[int] = model(lowercase_ , noise=lowercase_ )
lowercase__ : Any = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowercase_ )
lowercase__ : str = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Dict = model_class.from_config(model.config )
lowercase__ : Optional[int] = new_model(lowercase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Tuple = new_model(lowercase_ , noise=lowercase_ )
self.assert_outputs_same(lowercase_ , lowercase_ )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : Dict ) -> Tuple:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __UpperCamelCase ( self : Dict ) -> List[str]:
pass
@slow
def __UpperCamelCase ( self : Dict ) -> List[Any]:
lowercase__ : Tuple = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(lowercase_ )
def lowercase_ ( ):
lowercase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : List[str] ) -> str:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : int = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=lowercase_ , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : List[str] = ViTMAEConfig()
lowercase__ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Tuple = model(**lowercase_ , noise=lowercase_ )
# verify the logits
lowercase__ : Optional[Any] = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase__ : str = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowercase_ , atol=1E-4 )
| 87 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | 1 |
# flake8: noqa
# Lint as: python3
UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 87 | import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
def __init__( self : List[Any] , lowercase_ : bool = True ) -> None:
lowercase__ : dict[T, list[T]] = {} # dictionary of lists
lowercase__ : Union[str, Any] = directed
def __UpperCamelCase ( self : List[str] , lowercase_ : T , lowercase_ : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
self.adj_list[destination_vertex].append(lowercase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
lowercase__ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase_ )
lowercase__ : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase__ : List[Any] = [destination_vertex]
lowercase__ : List[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
lowercase__ : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase__ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase__ : Optional[Any] = [destination_vertex]
lowercase__ : Optional[int] = []
return self
def __repr__( self : Dict ) -> str:
return pformat(self.adj_list )
| 87 | import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87 | 1 |
UpperCamelCase = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
UpperCamelCase = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = from_type.lower().strip("s")
lowercase__ : Any = to_type.lower().strip("s")
lowercase__ : Union[str, Any] = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase)
if from_sanitized not in METRIC_CONVERSION:
lowercase__ : List[str] = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(_lowerCamelCase)}'''
)
raise ValueError(_lowerCamelCase)
if to_sanitized not in METRIC_CONVERSION:
lowercase__ : Union[str, Any] = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(_lowerCamelCase)}'''
)
raise ValueError(_lowerCamelCase)
lowercase__ : List[str] = METRIC_CONVERSION[from_sanitized]
lowercase__ : Any = METRIC_CONVERSION[to_sanitized]
lowercase__ : Any = 1
if from_exponent > to_exponent:
lowercase__ : Tuple = from_exponent - to_exponent
else:
lowercase__ : Any = -(to_exponent - from_exponent)
return value * pow(10 , _lowerCamelCase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase_ ( _lowerCamelCase : List[str]):
if not is_accelerate_available():
return method
lowercase__ : Optional[int] = version.parse(accelerate.__version__).base_version
if version.parse(_lowerCamelCase) < version.parse("0.17.0"):
return method
def wrapper(self : Optional[int] , *_lowerCamelCase : str , **_lowerCamelCase : Optional[Any]):
if hasattr(self , "_hf_hook") and hasattr(self._hf_hook , "pre_forward"):
self._hf_hook.pre_forward(self)
return method(self , *_lowerCamelCase , **_lowerCamelCase)
return wrapper
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict):
lowercase__ : List[Any] = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = tmp_path / "cache"
lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : int = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : str = features.copy()
lowercase__ : str = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]):
lowercase__ : Union[str, Any] = tmp_path / "cache"
lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Tuple = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = [jsonl_path]
lowercase__ : str = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]):
lowercase__ : str = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
if split:
lowercase__ : Tuple = {split: jsonl_path}
else:
lowercase__ : Tuple = "train"
lowercase__ : int = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Optional[int] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : str = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Optional[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any:
lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : List[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : str = f.read()
assert exported_content == original_content
| 87 | 1 |
import random
from typing import Any
def lowercase_ ( _lowerCamelCase : list):
for _ in range(len(_lowerCamelCase)):
lowercase__ : Dict = random.randint(0 , len(_lowerCamelCase) - 1)
lowercase__ : Union[str, Any] = random.randint(0 , len(_lowerCamelCase) - 1)
lowercase__ , lowercase__ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 87 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case_ ( __A ):
def __init__( self : List[Any] , *lowercase_ : List[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , **lowercase_ : str ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
lowercase__ : Tuple = eval_examples
lowercase__ : Optional[int] = post_process_function
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[Dataset] = None , lowercase_ : Any=None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "eval" , **lowercase_ : Any , ) -> Dict[str, float]:
lowercase__ : List[Any] = gen_kwargs.copy()
lowercase__ : Optional[int] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowercase__ : List[str] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowercase__ : Tuple = gen_kwargs
lowercase__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ : str = self.get_eval_dataloader(lowercase_ )
lowercase__ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[str] = self.compute_metrics
lowercase__ : str = None
lowercase__ : Any = time.time()
lowercase__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Optional[int] = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : int = compute_metrics
lowercase__ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ : Optional[int] = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : List[str] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
lowercase__ : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def __UpperCamelCase ( self : str , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : str = "test" , **lowercase_ : Any ) -> Tuple:
lowercase__ : Any = gen_kwargs.copy()
lowercase__ : Tuple = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[Any] = self.compute_metrics
lowercase__ : Optional[Any] = None
lowercase__ : Any = time.time()
lowercase__ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Dict = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : Optional[int] = compute_metrics
lowercase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ : int = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , "predict" )
lowercase__ : List[Any] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : Tuple = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 87 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Dict = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
__A : Dict = "philschmid/bart-large-cnn-samsum"
__A : List[str] = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__A : List[str] = "summarizer"
__A : Union[str, Any] = AutoTokenizer
__A : List[str] = AutoModelForSeqaSeqLM
__A : Union[str, Any] = ["text"]
__A : Optional[int] = ["text"]
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] ) -> str:
return self.pre_processor(lowercase_ , return_tensors="pt" , truncation=lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : List[str] ) -> str:
return self.model.generate(**lowercase_ )[0]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
return self.pre_processor.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
| 87 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87 | UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : Optional[int] = "token-classification"
def __init__( self : int , lowercase_ : Any ) -> Optional[Any]:
if type(lowercase_ ) == dict:
lowercase__ : Tuple = Namespace(**lowercase_ )
lowercase__ : List[Any] = import_module("tasks" )
try:
lowercase__ : Dict = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : List[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : Dict = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Tuple , **lowercase_ : int ) -> Dict:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ) -> Any:
lowercase__ : Tuple = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : List[str] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : str = self(**lowercase_ )
lowercase__ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Any ) -> Tuple:
lowercase__ : str = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : str = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : Union[str, Any] = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Tuple = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Optional[Any] = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : Optional[int] = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : Tuple = torch.load(lowercase_ )
lowercase__ : Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any ) -> List[str]:
"""Compute validation""" ""
lowercase__ : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Union[str, Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : str = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : int , lowercase_ : Any ) -> Union[str, Any]:
lowercase__ : Any = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : List[str] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : List[Any] = np.argmax(lowercase_ , axis=2 )
lowercase__ : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Dict = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : Dict = dict(results.items() )
lowercase__ : str = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Dict , lowercase_ : Union[str, Any] ) -> Tuple:
# when stable
lowercase__ , lowercase__ , lowercase__ : str = self._eval_end(lowercase_ )
lowercase__ : Union[str, Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Optional[int]:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : List[Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Union[str, Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 87 | import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int=1024 , _lowerCamelCase : List[Any]=1024 , _lowerCamelCase : List[Any]=False , **_lowerCamelCase : Optional[int]):
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase)
lowercase__ : Optional[Any] = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path="train" , **_lowerCamelCase)
lowercase__ : Union[str, Any] = tok.pad_token_id
def get_lens(_lowerCamelCase : Optional[Any]):
lowercase__ : Optional[int] = tqdm(
DataLoader(_lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCamelCase , collate_fn=ds.collate_fn) , desc=str(ds.len_file) , )
lowercase__ : List[str] = []
for batch in dl:
lowercase__ : Any = batch["input_ids"].ne(_lowerCamelCase).sum(1).tolist()
lowercase__ : List[str] = batch["labels"].ne(_lowerCamelCase).sum(1).tolist()
if consider_target:
for src, tgt in zip(_lowerCamelCase , _lowerCamelCase):
max_lens.append(max(_lowerCamelCase , _lowerCamelCase))
else:
max_lens.extend(_lowerCamelCase)
return max_lens
lowercase__ : List[Any] = get_lens(_lowerCamelCase)
lowercase__ : List[Any] = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path="val" , **_lowerCamelCase)
lowercase__ : int = get_lens(_lowerCamelCase)
pickle_save(_lowerCamelCase , train_ds.len_file)
pickle_save(_lowerCamelCase , val_ds.len_file)
if __name__ == "__main__":
fire.Fire(save_len_file)
| 87 | def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 87 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class snake_case_ ( __A ):
__A : List[str] = "longformer"
def __init__( self : Any , lowercase_ : Union[List[int], int] = 5_12 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 0 , lowercase_ : int = 2 , lowercase_ : int = 3_05_22 , lowercase_ : int = 7_68 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 30_72 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 5_12 , lowercase_ : int = 2 , lowercase_ : float = 0.02 , lowercase_ : float = 1E-12 , lowercase_ : bool = False , **lowercase_ : Dict , ) -> str:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowercase__ : List[Any] = attention_window
lowercase__ : Any = sep_token_id
lowercase__ : Dict = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
lowercase__ : str = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Tuple = hidden_act
lowercase__ : int = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : Dict = onnx_export
class snake_case_ ( __A ):
def __init__( self : Optional[int] , lowercase_ : "PretrainedConfig" , lowercase_ : str = "default" , lowercase_ : "List[PatchingSpec]" = None ) -> int:
super().__init__(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Any = True
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
lowercase__ : Optional[int] = super().outputs
if self.task == "default":
lowercase__ : Optional[Any] = {0: "batch"}
return outputs
@property
def __UpperCamelCase ( self : Optional[int] ) -> float:
return 1E-4
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __UpperCamelCase ( self : List[str] , lowercase_ : "PreTrainedTokenizerBase" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase__ : Tuple = super().generate_dummy_inputs(
preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase__ : Dict = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowercase__ : str = 1
return inputs
| 87 | from PIL import Image
def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int):
lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCamelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCamelCase = '''sshleifer/mar_enro_6_3_student'''
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Dict ) -> int:
super().setUp()
lowercase__ : Any = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=lowercase_ , )
lowercase__ : Dict = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[str] ) -> Tuple:
MarianMTModel.from_pretrained(lowercase_ )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ : Optional[int] = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
lowercase__ : Any = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
lowercase__ : int = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
lowercase__ : Tuple = bash_script.replace(lowercase_ , str(lowercase_ ) )
lowercase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ : Tuple = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ : Optional[int] = ["finetune.py"] + bash_script.split() + args
with patch.object(lowercase_ , "argv" , lowercase_ ):
lowercase__ : Dict = argparse.ArgumentParser()
lowercase__ : str = pl.Trainer.add_argparse_args(lowercase_ )
lowercase__ : Dict = SummarizationModule.add_model_specific_args(lowercase_ , os.getcwd() )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : Optional[int] = main(lowercase_ )
# Check metrics
lowercase__ : str = load_json(model.metrics_save_path )
lowercase__ : Union[str, Any] = metrics["val"][0]
lowercase__ : Optional[Any] = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowercase_ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ : Optional[int] = os.listdir(lowercase_ )
lowercase__ : List[str] = [x for x in contents if x.endswith(".ckpt" )][0]
lowercase__ : str = os.path.join(args.output_dir , lowercase_ )
lowercase__ : str = torch.load(lowercase_ , map_location="cpu" )
lowercase__ : List[Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ : List[str] = {os.path.basename(lowercase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class snake_case_ ( __A ):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
lowercase__ : int = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ : Any = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_28,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
lowercase__ : str = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
lowercase__ : Union[str, Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
lowercase__ : Any = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
lowercase__ : List[Any] = bash_script.replace(lowercase_ , str(lowercase_ ) )
lowercase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowercase__ : int = bash_script.replace("--fp16" , "" )
lowercase__ : List[Any] = 6
lowercase__ : Any = (
["distillation.py"]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"--gpus=1",
"--learning_rate=1e-3",
F'''--num_train_epochs={epochs}''',
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(lowercase_ , "argv" , lowercase_ ):
lowercase__ : str = argparse.ArgumentParser()
lowercase__ : List[str] = pl.Trainer.add_argparse_args(lowercase_ )
lowercase__ : List[Any] = SummarizationDistiller.add_model_specific_args(lowercase_ , os.getcwd() )
lowercase__ : Optional[int] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ : Dict = distill_main(lowercase_ )
# Check metrics
lowercase__ : Optional[Any] = load_json(model.metrics_save_path )
lowercase__ : List[Any] = metrics["val"][0]
lowercase__ : int = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowercase_ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ : Union[str, Any] = os.listdir(lowercase_ )
lowercase__ : Optional[int] = [x for x in contents if x.endswith(".ckpt" )][0]
lowercase__ : Optional[int] = os.path.join(args.output_dir , lowercase_ )
lowercase__ : Dict = torch.load(lowercase_ , map_location="cpu" )
lowercase__ : Union[str, Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ : Any = {os.path.basename(lowercase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 87 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 1 |
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowercase_ ( _lowerCamelCase : int = 100):
lowercase__ : Dict = 1
lowercase__ : Optional[int] = 2
for i in range(2 , max_n + 1):
lowercase__ : int = pre_numerator
lowercase__ : Dict = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ : Any = cur_numerator
lowercase__ : List[Any] = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase)
if __name__ == "__main__":
print(f"{solution() = }")
| 87 | import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]=False):
lowercase__ : int = OmegaConf.load(_lowerCamelCase)
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase)))
return config
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=None):
if conf_path is None:
lowercase__ : Optional[int] = "./model_checkpoints/vqgan_only.yaml"
lowercase__ : Union[str, Any] = load_config(_lowerCamelCase , display=_lowerCamelCase)
lowercase__ : List[Any] = VQModel(**config.model.params)
if ckpt_path is None:
lowercase__ : List[Any] = "./model_checkpoints/vqgan_only.pt"
lowercase__ : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase)
if ".ckpt" in ckpt_path:
lowercase__ : List[str] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
model.to(_lowerCamelCase)
del sd
return model
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]):
lowercase__ , lowercase__ , lowercase__ : Dict = model.encode(_lowerCamelCase)
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''')
lowercase__ : Union[str, Any] = model.decode(_lowerCamelCase)
return xrec
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple=False):
lowercase__ , lowercase__ : List[str] = string.rsplit("." , 1)
if reload:
lowercase__ : Any = importlib.import_module(_lowerCamelCase)
importlib.reload(_lowerCamelCase)
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase) , cls)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params" , {}))
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict=True , _lowerCamelCase : Dict=True):
lowercase__ : str = instantiate_from_config(_lowerCamelCase)
if sd is not None:
model.load_state_dict(_lowerCamelCase)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any):
# load the specified checkpoint
if ckpt:
lowercase__ : int = torch.load(_lowerCamelCase , map_location="cpu")
lowercase__ : str = pl_sd["global_step"]
print(f'''loaded model from global step {global_step}.''')
else:
lowercase__ : Tuple = {"state_dict": None}
lowercase__ : Optional[Any] = None
lowercase__ : List[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase)["model"]
return model, global_step
| 87 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Union[dict, list, tuple, torch.Tensor]):
lowercase__ : Optional[Any] = []
if isinstance(_lowerCamelCase , _lowerCamelCase):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase))
elif isinstance(_lowerCamelCase , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase))
elif isinstance(_lowerCamelCase , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("Not supported")
return shapes
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple[int, ...]):
lowercase__ : Union[str, Any] = []
for d in reversed(_lowerCamelCase):
idx.append(flat_idx % d)
lowercase__ : Union[str, Any] = flat_idx // d
return tuple(reversed(_lowerCamelCase))
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Optional[Sequence[bool]] = None , _lowerCamelCase : Optional[Sequence[bool]] = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_lowerCamelCase : List[bool]) -> None:
lowercase__ : List[Any] = True
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ : Optional[int] = l[reversed_idx]
if start_edges is None:
lowercase__ : int = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase)
if end_edges is None:
lowercase__ : Optional[Any] = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase)]
reduce_edge_list(_lowerCamelCase)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase) == 0:
return [()]
elif len(_lowerCamelCase) == 1:
return [(slice(start[0] , end[0] + 1),)]
lowercase__ : List[Tuple[slice, ...]] = []
lowercase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1))
else:
break
lowercase__ : Tuple[slice, ...] = tuple(_lowerCamelCase)
lowercase__ : Any = len(_lowerCamelCase)
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : str = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
lowercase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
lowercase__ : List[Any] = t.shape[:no_batch_dims]
lowercase__ : List[Any] = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase))
# _get_minimal_slice_set is inclusive
lowercase__ : List[Any] = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase))
# Get an ordered list of slices to perform
lowercase__ : Dict = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
lowercase__ : Tuple = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def lowercase_ ( _lowerCamelCase : Callable , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : Any = None , _lowerCamelCase : bool = False , ):
if not (len(_lowerCamelCase) > 0):
raise ValueError("Must provide at least one input")
lowercase__ : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase)]
lowercase__ : Optional[Any] = tuple([max(_lowerCamelCase) for s in zip(*_lowerCamelCase)])
def _prep_inputs(_lowerCamelCase : torch.Tensor) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
lowercase__ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
lowercase__ : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
lowercase__ : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
lowercase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , _lowerCamelCase)
lowercase__ : Optional[int] = None
if _out is not None:
lowercase__ : Union[str, Any] = tensor_tree_map(lambda _lowerCamelCase: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
lowercase__ : List[str] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ : Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase : torch.Tensor) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ : Optional[int] = 0
lowercase__ : Dict = prepped_outputs
for _ in range(_lowerCamelCase):
# Chunk the input
if not low_mem:
lowercase__ : int = _select_chunk
else:
lowercase__ : Any = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size) , no_batch_dims=len(_lowerCamelCase) , )
lowercase__ : Dict[str, Any] = tensor_tree_map(_lowerCamelCase , _lowerCamelCase)
# Run the layer on the chunk
lowercase__ : Optional[int] = layer(**_lowerCamelCase)
# Allocate space for the output
if out is None:
lowercase__ : Optional[int] = tensor_tree_map(lambda _lowerCamelCase: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , _lowerCamelCase)
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase):
def assign(_lowerCamelCase : dict , _lowerCamelCase : dict) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase):
assign(_lowerCamelCase , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ : Tuple = da[k]
assign(_lowerCamelCase , _lowerCamelCase)
elif isinstance(_lowerCamelCase , _lowerCamelCase):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ : str = xa
elif isinstance(_lowerCamelCase , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ : Dict = output_chunk
else:
raise ValueError("Not supported")
i += chunk_size
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: t.view(orig_batch_dims + t.shape[1:]) , _lowerCamelCase)
return out
class snake_case_ :
def __init__( self : Optional[Any] , lowercase_ : int = 5_12 , ) -> List[Any]:
lowercase__ : str = max_chunk_size
lowercase__ : Optional[int] = None
lowercase__ : Optional[tuple] = None
def __UpperCamelCase ( self : Tuple , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ : Dict = [c for c in candidates if c > min_chunk_size]
lowercase__ : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase_ : int ) -> bool:
try:
with torch.no_grad():
fn(*lowercase_ , chunk_size=lowercase_ )
return True
except RuntimeError:
return False
lowercase__ : Dict = 0
lowercase__ : Any = len(lowercase_ ) - 1
while i > min_viable_chunk_size_index:
lowercase__ : Any = test_chunk_size(candidates[i] )
if not viable:
lowercase__ : Union[str, Any] = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ : int = i
lowercase__ : Union[str, Any] = (i + len(lowercase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : int , lowercase_ : Iterable , lowercase_ : Iterable ) -> bool:
lowercase__ : Optional[Any] = True
for aa, aa in zip(lowercase_ , lowercase_ ):
assert type(lowercase_ ) == type(lowercase_ )
if isinstance(lowercase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : Any = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
lowercase__ : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[str] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int , ) -> int:
lowercase__ : Tuple = True
lowercase__ : tuple = tree_map(lambda lowercase_ : a.shape if isinstance(lowercase_ , torch.Tensor ) else a , lowercase_ , lowercase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase_ )
lowercase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , lowercase_ )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ : Union[str, Any] = False
if not consistent:
lowercase__ : Optional[int] = self._determine_favorable_chunk_size(
lowercase_ , lowercase_ , lowercase_ , )
lowercase__ : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 87 | import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : int = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
))
return embed
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
lowercase__ : Optional[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias'''))
return attention_weights
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : Tuple = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token"))
return token
def lowercase_ ( ):
lowercase__ : List[str] = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : List[str] = 1000
lowercase__ : Dict = "huggingface/label-files"
lowercase__ : List[Any] = num_labels
lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1)[-1][4:6] == "13":
lowercase__ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21":
lowercase__ : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Union[str, Any] = [2, 2, 20]
lowercase__ : Optional[Any] = [3, 12, 16]
lowercase__ : Optional[Any] = [192, 768, 1024]
lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase)
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
lowercase__ : int = image_size
lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu"))
lowercase__ : Any = OrderedDict()
lowercase__ : int = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase)
for cnt in range(config.depth[idx]):
lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase)
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
image_processor.save_pretrained(_lowerCamelCase)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 87 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _lowerCamelCase : str = ""):
lowercase__ : str = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowercase__ : int = BeautifulSoup(requests.get(_lowerCamelCase).text , "html.parser")
lowercase__ : int = soup.find_all("td" , attrs="titleColumn")
lowercase__ : Any = soup.find_all("td" , class_="ratingColumn imdbRating")
return {
title.a.text: float(rating.strong.text)
for title, rating in zip(_lowerCamelCase , _lowerCamelCase)
}
def lowercase_ ( _lowerCamelCase : str = "IMDb_Top_250_Movies.csv"):
lowercase__ : Dict = get_imdb_top_aaa_movies()
with open(_lowerCamelCase , "w" , newline="") as out_file:
lowercase__ : Optional[int] = csv.writer(_lowerCamelCase)
writer.writerow(["Movie title", "IMDb rating"])
for title, rating in movies.items():
writer.writerow([title, rating])
if __name__ == "__main__":
write_movies()
| 87 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCamelCase = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = '''cpu'''
UpperCamelCase = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
UpperCamelCase = '''path-to-your-trained-model'''
UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCamelCase = pipe.to(device)
# to channels last
UpperCamelCase = pipe.unet.to(memory_format=torch.channels_last)
UpperCamelCase = pipe.vae.to(memory_format=torch.channels_last)
UpperCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCamelCase = torch.randn(2, 4, 64, 64)
UpperCamelCase = torch.rand(1) * 999
UpperCamelCase = torch.randn(2, 77, 768)
UpperCamelCase = (sample, timestep, encoder_hidden_status)
try:
UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCamelCase = 666
UpperCamelCase = torch.Generator(device).manual_seed(seed)
UpperCamelCase = {'''generator''': generator}
if args.steps is not None:
UpperCamelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCamelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 87 | import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case_ ( __A ):
@slow
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowercase__ : List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : str = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : Tuple = bertabert.config.encoder.vocab_size
lowercase__ : Tuple = tokenizer.sep_token_id
lowercase__ : Dict = tokenizer.cls_token_id
lowercase__ : Optional[int] = 1_28
lowercase__ : List[str] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Any = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : List[Any] = train_dataset.select(range(32 ) )
lowercase__ : str = val_dataset.select(range(16 ) )
lowercase__ : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(lowercase_ : List[str] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : Union[str, Any] = tokenizer(batch["article"] , padding="max_length" , truncation=lowercase_ , max_length=5_12 )
lowercase__ : str = tokenizer(batch["highlights"] , padding="max_length" , truncation=lowercase_ , max_length=1_28 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : int = inputs.attention_mask
lowercase__ : Optional[int] = outputs.input_ids
lowercase__ : Union[str, Any] = outputs.input_ids.copy()
lowercase__ : str = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : Union[str, Any] = outputs.attention_mask
assert all(len(lowercase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ : List[str] ):
lowercase__ : List[str] = pred.label_ids
lowercase__ : int = pred.predictions
# all unnecessary tokens are removed
lowercase__ : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase__ : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase__ : Optional[int] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : str = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Dict = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : str = self.get_auto_remove_tmp_dir()
lowercase__ : Any = SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy="steps" , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : Union[str, Any] = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 87 | import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( __A ):
def __init__( self : Optional[Any] , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ : StableDiffusionSafetyChecker , lowercase_ : CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def __call__( self : int , lowercase_ : Union[str, List[str]] , lowercase_ : int = 5_12 , lowercase_ : int = 5_12 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , lowercase_ : Optional[torch.FloatTensor] = None , **lowercase_ : int , ) -> List[Any]:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[int] = 1
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[Any] = len(lowercase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
# get prompt text embeddings
lowercase__ : List[str] = self.tokenizer(
lowercase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowercase__ : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase__ : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ : List[Any] = text_embeddings.shape
lowercase__ : Union[str, Any] = text_embeddings.repeat(1 , lowercase_ , 1 )
lowercase__ : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ : List[str]
if negative_prompt is None:
lowercase__ : int = [""]
elif type(lowercase_ ) is not type(lowercase_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_ )} !='''
F''' {type(lowercase_ )}.''' )
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : str = [negative_prompt]
elif batch_size != len(lowercase_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(lowercase_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
lowercase__ : Union[str, Any] = negative_prompt
lowercase__ : List[Any] = text_input_ids.shape[-1]
lowercase__ : Any = self.tokenizer(
lowercase_ , padding="max_length" , max_length=lowercase_ , truncation=lowercase_ , return_tensors="pt" , )
lowercase__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ : Union[str, Any] = uncond_embeddings.shape[1]
lowercase__ : str = uncond_embeddings.repeat(lowercase_ , lowercase_ , 1 )
lowercase__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ : Tuple = torch.randn(
lowercase_ , generator=lowercase_ , device="cpu" , dtype=lowercase_ ).to(self.device )
lowercase__ : Union[str, Any] = torch.randn(lowercase_ , generator=lowercase_ , device="cpu" , dtype=lowercase_ ).to(
self.device )
else:
lowercase__ : Any = torch.randn(
lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
lowercase__ : Tuple = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase__ : Dict = latents_reference.to(self.device )
lowercase__ : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ : str = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ : Any = 0 if dx < 0 else dx
lowercase__ : Optional[Any] = 0 if dy < 0 else dy
lowercase__ : List[Any] = max(-dx , 0 )
lowercase__ : str = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowercase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Tuple = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : int = {}
if accepts_eta:
lowercase__ : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Any = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase__ : Optional[int] = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ : List[str] = noise_pred.chunk(2 )
lowercase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = 1 / 0.1_82_15 * latents
lowercase__ : Dict = self.vae.decode(lowercase_ ).sample
lowercase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__ : List[str] = self.feature_extractor(self.numpy_to_pil(lowercase_ ) , return_tensors="pt" ).to(
self.device )
lowercase__ , lowercase__ : int = self.safety_checker(
images=lowercase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__ : List[str] = None
if output_type == "pil":
lowercase__ : List[str] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
| 87 | import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | 1 |
import argparse
import os
import re
UpperCamelCase = '''src/transformers'''
# Pattern that looks at the indentation in a line.
UpperCamelCase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase = re.compile(R'''\[([^\]]+)\]''')
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : str = _re_indent.search(_lowerCamelCase)
return "" if search is None else search.groups()[0]
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple="" , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None):
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = code.split("\n")
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase):
index += 1
lowercase__ : str = ["\n".join(lines[:index])]
else:
lowercase__ : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ : Any = [lines[index]]
index += 1
while index < len(_lowerCamelCase) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(_lowerCamelCase) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "):
current_block.append(lines[index])
blocks.append("\n".join(_lowerCamelCase))
if index < len(_lowerCamelCase) - 1:
lowercase__ : List[str] = [lines[index + 1]]
index += 1
else:
lowercase__ : List[str] = []
else:
blocks.append("\n".join(_lowerCamelCase))
lowercase__ : List[str] = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase) > 0:
blocks.append("\n".join(_lowerCamelCase))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase):
blocks.append("\n".join(lines[index:]))
return blocks
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _inner(_lowerCamelCase : str):
return key(_lowerCamelCase).lower().replace("_" , "")
return _inner
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None):
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : str):
return x
if key is None:
lowercase__ : Any = noop
# Constants are all uppercase, they go first.
lowercase__ : Tuple = [obj for obj in objects if key(_lowerCamelCase).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ : str = [obj for obj in objects if key(_lowerCamelCase)[0].isupper() and not key(_lowerCamelCase).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ : Any = [obj for obj in objects if not key(_lowerCamelCase)[0].isupper()]
lowercase__ : Dict = ignore_underscore(_lowerCamelCase)
return sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : List[Any]):
lowercase__ : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowercase__ : Optional[int] = [part.strip().replace("\"" , "") for part in imports.split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
lowercase__ : Optional[int] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)]) + "]"
lowercase__ : List[Any] = import_statement.split("\n")
if len(_lowerCamelCase) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ : Dict = 2 if lines[1].strip() == "[" else 1
lowercase__ : Optional[Any] = [(i, _re_strip_line.search(_lowerCamelCase).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
lowercase__ : Any = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase: x[1])
lowercase__ : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(_lowerCamelCase) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
lowercase__ : Any = _re_bracket_content.sub(_replace , lines[1])
else:
lowercase__ : List[Any] = [part.strip().replace("\"" , "") for part in lines[1].split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
lowercase__ : Optional[Any] = keys[:-1]
lowercase__ : Optional[Any] = get_indent(lines[1]) + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)])
return "\n".join(_lowerCamelCase)
else:
# Finally we have to deal with imports fitting on one line
lowercase__ : Any = _re_bracket_content.sub(_replace , _lowerCamelCase)
return import_statement
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=True):
with open(_lowerCamelCase , encoding="utf-8") as f:
lowercase__ : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ : List[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:")
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ : Optional[int] = main_blocks[block_idx]
lowercase__ : Any = block.split("\n")
# Get to the start of the imports.
lowercase__ : int = 0
while line_idx < len(_lowerCamelCase) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ : List[str] = len(_lowerCamelCase)
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ : str = "\n".join(block_lines[line_idx:-1])
lowercase__ : Optional[Any] = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
lowercase__ : List[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase)
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ : Optional[int] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ : Dict = [(pattern.search(_lowerCamelCase).groups()[0] if pattern.search(_lowerCamelCase) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ : Optional[int] = [(i, key) for i, key in enumerate(_lowerCamelCase) if key is not None]
lowercase__ : List[Any] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ : Tuple = 0
lowercase__ : Tuple = []
for i in range(len(_lowerCamelCase)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
lowercase__ : int = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(_lowerCamelCase)
count += 1
# And we put our main block back together with its first and last line.
lowercase__ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(_lowerCamelCase):
if check_only:
return True
else:
print(f'''Overwriting {file}.''')
with open(_lowerCamelCase , "w" , encoding="utf-8") as f:
f.write("\n".join(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : List[Any]=True):
lowercase__ : Optional[int] = []
for root, _, files in os.walk(_lowerCamelCase):
if "__init__.py" in files:
lowercase__ : Optional[int] = sort_imports(os.path.join(_lowerCamelCase , "__init__.py") , check_only=_lowerCamelCase)
if result:
lowercase__ : List[str] = [os.path.join(_lowerCamelCase , "__init__.py")]
if len(_lowerCamelCase) > 0:
raise ValueError(f'''Would overwrite {len(_lowerCamelCase)} files, run `make style`.''')
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 87 | import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 | 1 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase = '''scheduler_config.json'''
class snake_case_ ( __A ):
__A : List[Any] = 1
__A : str = 2
__A : int = 3
__A : Tuple = 4
__A : Dict = 5
@dataclass
class snake_case_ ( __A ):
__A : jnp.ndarray
class snake_case_ :
__A : Optional[Any] = SCHEDULER_CONFIG_NAME
__A : int = ["dtype"]
__A : int = []
__A : List[str] = True
@classmethod
def __UpperCamelCase ( cls : str , lowercase_ : Dict[str, Any] = None , lowercase_ : Optional[str] = None , lowercase_ : Tuple=False , **lowercase_ : Union[str, Any] , ) -> Any:
lowercase__ , lowercase__ : int = cls.load_config(
pretrained_model_name_or_path=lowercase_ , subfolder=lowercase_ , return_unused_kwargs=lowercase_ , **lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = cls.from_config(lowercase_ , return_unused_kwargs=lowercase_ , **lowercase_ )
if hasattr(lowercase_ , "create_state" ) and getattr(lowercase_ , "has_state" , lowercase_ ):
lowercase__ : Optional[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self : Any , lowercase_ : Union[str, os.PathLike] , lowercase_ : bool = False , **lowercase_ : List[Any] ) -> str:
self.save_config(save_directory=lowercase_ , push_to_hub=lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls : Optional[int] ) -> Tuple:
lowercase__ : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
lowercase__ : int = importlib.import_module(__name__.split("." )[0] )
lowercase__ : int = [
getattr(lowercase_ , lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ , lowercase_ )
]
return compatible_classes
def lowercase_ ( _lowerCamelCase : jnp.ndarray , _lowerCamelCase : Tuple[int]):
assert len(_lowerCamelCase) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCamelCase) - x.ndim)) , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=0.999 , _lowerCamelCase : Tuple=jnp.floataa):
def alpha_bar(_lowerCamelCase : Dict):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
lowercase__ : Tuple = []
for i in range(_lowerCamelCase):
lowercase__ : Tuple = i / num_diffusion_timesteps
lowercase__ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCamelCase) / alpha_bar(_lowerCamelCase) , _lowerCamelCase))
return jnp.array(_lowerCamelCase , dtype=_lowerCamelCase)
@flax.struct.dataclass
class snake_case_ :
__A : jnp.ndarray
__A : jnp.ndarray
__A : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , lowercase_ : Union[str, Any] ) -> str:
lowercase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowercase__ : List[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase__ : Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ : Union[str, Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ : Dict = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowercase__ : List[Any] = 1.0 - betas
lowercase__ : Optional[Any] = jnp.cumprod(lowercase_ , axis=0 )
return cls(
alphas=lowercase_ , betas=lowercase_ , alphas_cumprod=lowercase_ , )
def lowercase_ ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray):
lowercase__ : List[Any] = state.alphas_cumprod
lowercase__ : Any = alphas_cumprod[timesteps] ** 0.5
lowercase__ : Optional[int] = sqrt_alpha_prod.flatten()
lowercase__ : int = broadcast_to_shape_from_left(_lowerCamelCase , original_samples.shape)
lowercase__ : Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
lowercase__ : Optional[int] = broadcast_to_shape_from_left(_lowerCamelCase , original_samples.shape)
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowercase_ ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray):
lowercase__ , lowercase__ : Dict = get_sqrt_alpha_prod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowercase_ ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray):
lowercase__ , lowercase__ : Dict = get_sqrt_alpha_prod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : Dict = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 87 | from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.