code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Optional[Any] = 16
A_ : int = 32
def A ( snake_case__ , snake_case__ = 16 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : List[Any] = mocked_dataloaders # noqa: F811
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _SCREAMING_SNAKE_CASE ) == "1":
SCREAMING_SNAKE_CASE__ = 2
# New Code #
SCREAMING_SNAKE_CASE__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = output.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 165 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_snake_case = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 341 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase : Union[str, Any] ={
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(__lowerCAmelCase ) , version.parse(__lowerCAmelCase ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None ) -> None:
UpperCamelCase__ : Optional[Any] = f'\n{hint}' if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$" , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = requirement, None, None
else:
UpperCamelCase__ : int = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , __lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f' got {requirement}' )
UpperCamelCase__ , UpperCamelCase__ : Any = match[0]
UpperCamelCase__ : Union[str, Any] = want_full.split("," ) # there could be multiple requirements
UpperCamelCase__ : Optional[Any] = {}
for w in want_range:
UpperCamelCase__ : Tuple = re.findall(R"^([\s!=<>]{1,2})(.+)" , __lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f' but got {requirement}' )
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = match[0]
UpperCamelCase__ : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
UpperCamelCase__ : Optional[Any] = ".".join([str(__lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return
# check if any version is installed
try:
UpperCamelCase__ : Union[str, Any] = importlib.metadata.version(__lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
UpperCamelCase__ : List[Any] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__lowerCAmelCase , __lowerCAmelCase ) | 196 |
import re
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
UpperCamelCase__ : Union[str, Any] = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] ='''0094702343221'''
print(is_sri_lankan_phone_number(phone)) | 196 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["DeiTFeatureExtractor"]
lowercase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = 10, 0.0
UpperCamelCase__: List[Any] = self.dummy_model()
UpperCamelCase__: Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
UpperCamelCase__: Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config()
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.scheduler_classes[0]
UpperCamelCase__: Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
UpperCamelCase__: Tuple = self.dummy_model()
UpperCamelCase__: Union[str, Any] = self.dummy_sample_deter
UpperCamelCase__: Dict = self.dummy_sample_deter + 0.1
UpperCamelCase__: Dict = self.dummy_sample_deter - 0.1
UpperCamelCase__: int = samplea.shape[0]
UpperCamelCase__: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__: Union[str, Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
UpperCamelCase__: str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__: Optional[int] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase )
UpperCamelCase__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = self.full_loop()
UpperCamelCase__: List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 149 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Union[str, Any] = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCAmelCase = set()
return any(
node not in visited and depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
for node in graph)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
visited.add(lowerCamelCase)
rec_stk.add(lowerCamelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 174 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_:Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
A : Any = val
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A : int = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A : Any = value
else:
A : List[Any] = value
return new_state_dict
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : str = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A : int = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
A : Tuple = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A : Tuple = in_proj_weight[:256, :]
A : str = in_proj_bias[:256]
A : str = in_proj_weight[256:512, :]
A : List[Any] = in_proj_bias[256:512]
A : Any = in_proj_weight[-256:, :]
A : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A : Tuple = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
A : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A : Union[str, Any] = in_proj_weight[:256, :]
A : Optional[int] = in_proj_bias[:256]
A : str = in_proj_weight[256:512, :]
A : Union[str, Any] = in_proj_bias[256:512]
A : int = in_proj_weight[-256:, :]
A : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A : str = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
A : List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A : int = in_proj_weight_cross_attn[:256, :]
A : str = in_proj_bias_cross_attn[:256]
A : List[str] = in_proj_weight_cross_attn[256:512, :]
A : List[Any] = in_proj_bias_cross_attn[256:512]
A : List[str] = in_proj_weight_cross_attn[-256:, :]
A : List[str] = in_proj_bias_cross_attn[-256:]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = image.size
A : Dict = max(_lowerCAmelCase , _lowerCAmelCase )
A : List[Any] = 800 if """detection""" in checkpoint_url else 1000
A : str = target_max_size / current_max_size
A : List[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Tuple = F.to_tensor(_lowerCAmelCase )
A : Optional[Any] = F.normalize(_lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
A : List[Any] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : int = rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A : int = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
A : int = val
# create HuggingFace model and load state dict
A : str = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A : List[Any] = 15
A : Any = 2
A : Dict = {0: """table""", 1: """table rotated"""}
A : Any = idalabel
A : Any = {v: k for k, v in idalabel.items()}
else:
A : int = 125
A : str = 6
A : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
A : Optional[Any] = idalabel
A : Tuple = {v: k for k, v in idalabel.items()}
A : Optional[int] = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
A : Dict = TableTransformerForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# verify our conversion
A : Optional[Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
A : str = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=_lowerCAmelCase )
A : List[Any] = Image.open(_lowerCAmelCase ).convert("""RGB""" )
A : int = normalize(resize(_lowerCAmelCase , _lowerCAmelCase ) ).unsqueeze(0 )
A : Dict = model(_lowerCAmelCase )
if "detection" in checkpoint_url:
A : Tuple = (1, 15, 3)
A : str = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
A : str = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
A : Union[str, Any] = (1, 125, 7)
A : Dict = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
A : str = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
A : Optional[int] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_lowerCAmelCase )
image_processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 361 |
import socket
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A : str = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
A : Union[str, Any] = socket.gethostname()
A : Dict = 1_2312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
A : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCAmelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 115 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class _A:
"""simple docstring"""
def __init__( self ):
__A : list[Any] = []
__A : int = 0
__A : int = 0
def UpperCAmelCase_ ( self ):
return self.head == self.tail
def UpperCAmelCase_ ( self , _A ):
self.data.append(_A )
__A : List[str] = self.tail + 1
def UpperCAmelCase_ ( self ):
__A : List[str] = self.data[self.head]
__A : Tuple = self.head + 1
return ret
def UpperCAmelCase_ ( self ):
return self.tail - self.head
def UpperCAmelCase_ ( self ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
__A : MyNode | None = None
__A : MyNode | None = None
__A : int = 1
def UpperCAmelCase_ ( self ):
return self.data
def UpperCAmelCase_ ( self ):
return self.left
def UpperCAmelCase_ ( self ):
return self.right
def UpperCAmelCase_ ( self ):
return self.height
def UpperCAmelCase_ ( self , _A ):
__A : Dict = data
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = node
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = node
def UpperCAmelCase_ ( self , _A ):
__A : int = height
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if node is None:
return 0
return node.get_height()
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
if a > b:
return a
return b
def _SCREAMING_SNAKE_CASE ( a ) -> MyNode:
print('left rotation node:' , node.get_data() )
__A : int = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(a )
__A : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(a )
__A : int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(a )
return ret
def _SCREAMING_SNAKE_CASE ( a ) -> MyNode:
print('right rotation node:' , node.get_data() )
__A : Optional[int] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(a )
__A : Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(a )
__A : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(a )
return ret
def _SCREAMING_SNAKE_CASE ( a ) -> MyNode:
__A : List[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(a ) )
return right_rotation(a )
def _SCREAMING_SNAKE_CASE ( a ) -> MyNode:
__A : Tuple = node.get_right()
assert right_child is not None
node.set_right(right_rotation(a ) )
return left_rotation(a )
def _SCREAMING_SNAKE_CASE ( a , a ) -> MyNode | None:
if node is None:
return MyNode(a )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , a ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__A : Optional[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__A : Optional[Any] = right_rotation(a )
else:
__A : Optional[Any] = lr_rotation(a )
else:
node.set_right(insert_node(node.get_right() , a ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__A : Tuple = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__A : int = rl_rotation(a )
else:
__A : int = left_rotation(a )
__A : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(a )
return node
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
while True:
__A : int = root.get_right()
if right_child is None:
break
__A : Any = right_child
return root.get_data()
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
while True:
__A : Tuple = root.get_left()
if left_child is None:
break
__A : str = left_child
return root.get_data()
def _SCREAMING_SNAKE_CASE ( a , a ) -> MyNode | None:
__A : str = root.get_left()
__A : List[str] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__A : str = get_left_most(a )
root.set_data(a )
root.set_right(del_node(a , a ) )
elif left_child is not None:
__A : Any = left_child
elif right_child is not None:
__A : Tuple = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(a , a ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(a , a ) )
if get_height(a ) - get_height(a ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__A : Any = left_rotation(a )
else:
__A : str = rl_rotation(a )
elif get_height(a ) - get_height(a ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__A : Union[str, Any] = right_rotation(a )
else:
__A : Any = lr_rotation(a )
__A : Dict = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(a )
return root
class _A:
"""simple docstring"""
def __init__( self ):
__A : MyNode | None = None
def UpperCAmelCase_ ( self ):
return get_height(self.root )
def UpperCAmelCase_ ( self , _A ):
print('insert:' + str(_A ) )
__A : int = insert_node(self.root , _A )
def UpperCAmelCase_ ( self , _A ):
print('delete:' + str(_A ) )
if self.root is None:
print('Tree is empty!' )
return
__A : Optional[int] = del_node(self.root , _A )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
__A : Dict = ''
__A : Optional[Any] = MyQueue()
q.push(self.root )
__A : Dict = self.get_height()
if layer == 0:
return output
__A : List[Any] = 0
while not q.is_empty():
__A : Optional[Any] = q.pop()
__A : Optional[Any] = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_A )
q.push(_A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__A : str = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _A ) - 1:
__A : Any = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _SCREAMING_SNAKE_CASE ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
UpperCAmelCase : Optional[int] = AVLtree()
UpperCAmelCase : Any = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 280 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = '''▁'''
UpperCAmelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BertGenerationTokenizer
UpperCamelCase : str = False
UpperCamelCase : Tuple = True
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Tuple = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : str = '<s>'
__A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_A ) , 1002 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self ):
__A : str = BertGenerationTokenizer(_A , keep_accents=_A )
__A : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
__A : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__A : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ ( self ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'Hello World!'
__A : Optional[Any] = [18536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__A : int = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__A : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__A : List[Any] = ' '.join(_A )
__A : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors='pt' , return_token_type_ids=_A )
__A : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_A )
__A : int = BertGenerationConfig()
__A : List[str] = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
__A : str = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 280 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :float ):
'''simple docstring'''
snake_case_ : Dict = int(np.ceil((x_end - xa) / step_size ) )
snake_case_ : List[Any] = np.zeros((n + 1,) )
snake_case_ : Dict = ya
snake_case_ : Optional[int] = xa
for k in range(lowerCamelCase_ ):
snake_case_ : Tuple = y[k] + step_size * ode_func(lowerCamelCase_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : int = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 8 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_UpperCamelCase : List[str] = logging.getLogger(__name__)
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_05_22, type=int)
_UpperCamelCase : List[str] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
_UpperCamelCase : Dict = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
_UpperCamelCase : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
_UpperCamelCase : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
_UpperCamelCase : Tuple = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 77 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: list[int] , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[Any] = list(range(len(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = [v / w for v, w in zip(_lowerCamelCase , _lowerCamelCase )]
index.sort(key=lambda _lowerCamelCase : ratio[i] , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : list[float] = [0] * len(_lowerCamelCase )
for i in index:
if weight[i] <= capacity:
__SCREAMING_SNAKE_CASE : str = 1
max_value += value[i]
capacity -= weight[i]
else:
__SCREAMING_SNAKE_CASE : int = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : int = DebertaVaTokenizer
UpperCAmelCase : str = DebertaVaTokenizerFast
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = True
def __snake_case ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
a : Dict = DebertaVaTokenizer(__UpperCAmelCase , unk_token="<unk>")
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str):
a : Union[str, Any] = "this is a test"
a : List[Any] = "this is a test"
return input_text, output_text
def __snake_case ( self : List[str]):
a : str = "<pad>"
a : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<pad>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "[PAD]")
self.assertEqual(len(__UpperCAmelCase) , 30001)
def __snake_case ( self : Dict):
self.assertEqual(self.get_tokenizer().vocab_size , 30000)
def __snake_case ( self : Optional[Any]):
# fmt: off
a : Optional[Any] = " \tHeLLo!how \n Are yoU? "
a : Dict = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
a : List[Any] = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase)
a : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase)
a : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.")
def __snake_case ( self : Any):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.")
def __snake_case ( self : Union[str, Any]):
pass
def __snake_case ( self : Optional[int]):
# fmt: off
a : int = "I was born in 92000, and this is falsé."
a : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a : int = DebertaVaTokenizer(__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = DebertaVaTokenizerFast(__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : int):
# fmt: off
a : Dict = "I was born in 92000, and this is falsé."
a : List[str] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a : Union[str, Any] = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Tuple = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
# fmt: off
a : Optional[Any] = "I was born in 92000, and this is falsé."
a : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a : Tuple = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : str = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
# fmt: off
a : str = "I was born in 92000, and this is falsé."
a : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a : List[Any] = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : str = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
# fmt: off
a : Union[str, Any] = " \tHeLLo!how \n Are yoU? "
a : Tuple = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
a : List[Any] = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase)
a : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : int):
a : Optional[Any] = self.get_tokenizer()
a : Optional[int] = self.get_rust_tokenizer()
a : Dict = "I was born in 92000, and this is falsé."
a : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
a : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase))
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Any = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
a : Dict = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_rust_tokenizer()
a : List[str] = tokenizer.encode(__UpperCAmelCase)
a : int = rust_tokenizer.encode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Dict):
a : Dict = "This is a test"
a : str = [13, 1, 4398, 25, 21, 1289]
a : Optional[int] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
a : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
a : str = DebertaVaTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : int = DebertaVaTokenizerFast(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : Tuple = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : str = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Any = rust_tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : int = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
# fmt: off
a : Dict = "I was born in 92000, and this is falsé."
a : str = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
a : Tuple = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
a : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a : Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Tuple = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : List[str] = rust_tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Dict = DebertaVaTokenizer(__UpperCAmelCase)
a : Union[str, Any] = tokenizer.encode("sequence builders")
a : Any = tokenizer.encode("multi-sequence build")
a : str = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase)
a : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCAmelCase)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCAmelCase , )
@slow
def __snake_case ( self : str):
# fmt: off
a : int = {"input_ids": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 371 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , **__UpperCAmelCase : List[Any]):
super().__init__(**__UpperCAmelCase)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , "vision")
self.check_model_type(__UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __UpperCAmelCase : Union[str, List[str]] = None , **__UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
a : List[Any] = kwargs.pop("text_queries")
if isinstance(__UpperCAmelCase , (str, Image.Image)):
a : Any = {"image": image, "candidate_labels": candidate_labels}
else:
a : Optional[int] = image
a : Optional[int] = super().__call__(__UpperCAmelCase , **__UpperCAmelCase)
return results
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : List[Any]):
a : str = {}
if "threshold" in kwargs:
a : Dict = kwargs["threshold"]
if "top_k" in kwargs:
a : str = kwargs["top_k"]
return {}, {}, postprocess_params
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = load_image(inputs["image"])
a : Any = inputs["candidate_labels"]
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[Any] = candidate_labels.split(",")
a : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__UpperCAmelCase):
a : int = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework)
a : int = self.image_processor(__UpperCAmelCase , return_tensors=self.framework)
yield {
"is_last": i == len(__UpperCAmelCase) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]):
a : List[Any] = model_inputs.pop("target_size")
a : Optional[int] = model_inputs.pop("candidate_label")
a : List[Any] = model_inputs.pop("is_last")
a : List[Any] = self.model(**__UpperCAmelCase)
a : Union[str, Any] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=None):
a : Dict = []
for model_output in model_outputs:
a : int = model_output["candidate_label"]
a : Any = BaseModelOutput(__UpperCAmelCase)
a : Optional[Any] = self.image_processor.post_process_object_detection(
outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
a : Any = outputs["scores"][index].item()
a : str = self._get_bounding_box(outputs["boxes"][index][0])
a : Optional[Any] = {"score": score, "label": label, "box": box}
results.append(__UpperCAmelCase)
a : str = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)
if top_k:
a : Union[str, Any] = results[:top_k]
return results
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : "torch.Tensor"):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
a , a , a , a : List[Any] = box.int().tolist()
a : str = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 226 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=4 , )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_choices
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowerCAmelCase__, unittest.TestCase ):
a_ =(
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowerCAmelCase__ = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
lowerCAmelCase__ = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class A_ :
def __init__(self :Any , _UpperCamelCase :str , _UpperCamelCase :List[str] , _UpperCamelCase :bool = True , _UpperCamelCase :bool = False )-> List[str]:
__A = scheduler
__A = optimizers if isinstance(_UpperCamelCase , (list, tuple) ) else [optimizers]
__A = split_batches
__A = step_with_optimizer
__A = GradientState()
def _lowerCAmelCase (self :Tuple , *_UpperCamelCase :Tuple , **_UpperCamelCase :int )-> Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A = AcceleratorState().num_processes
for _ in range(_UpperCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
else:
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :List[Any] )-> Union[str, Any]:
return self.scheduler.get_last_lr()
def _lowerCAmelCase (self :Optional[int] )-> Any:
return self.scheduler.state_dict()
def _lowerCAmelCase (self :str , _UpperCamelCase :int )-> int:
self.scheduler.load_state_dict(_UpperCamelCase )
def _lowerCAmelCase (self :Any )-> Union[str, Any]:
return self.scheduler.get_lr()
def _lowerCAmelCase (self :List[str] , *_UpperCamelCase :Optional[Any] , **_UpperCamelCase :str )-> str:
return self.scheduler.print_lr(*_UpperCamelCase , **_UpperCamelCase )
| 250 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """vit_msn"""
def __init__(self :str , _UpperCamelCase :List[Any]=768 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Any=12 , _UpperCamelCase :Dict=3072 , _UpperCamelCase :str="gelu" , _UpperCamelCase :str=0.0 , _UpperCamelCase :Union[str, Any]=0.0 , _UpperCamelCase :Optional[int]=0.0_2 , _UpperCamelCase :Any=1e-06 , _UpperCamelCase :Any=224 , _UpperCamelCase :Optional[Any]=16 , _UpperCamelCase :Any=3 , _UpperCamelCase :str=True , **_UpperCamelCase :Dict , )-> Union[str, Any]:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = image_size
__A = patch_size
__A = num_channels
__A = qkv_bias
| 250 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''audio-spectrogram-transformer'''
def __init__( self : Tuple , lowerCAmelCase_ : Tuple=768 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Optional[int]=3_072 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0_2 , lowerCAmelCase_ : Optional[Any]=1e-12 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : Tuple=10 , lowerCAmelCase_ : Optional[Any]=1_024 , lowerCAmelCase_ : str=128 , **lowerCAmelCase_ : str , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : Any = frequency_stride
UpperCAmelCase_ : Tuple = time_stride
UpperCAmelCase_ : str = max_length
UpperCAmelCase_ : int = num_mel_bins
| 268 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase_ :
def __init__( self : str ) -> Dict:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 256
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 )
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[Any] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Any = int(last % last )
UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : Any = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 268 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
UpperCAmelCase = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
UpperCAmelCase = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def lowercase ( a__ : int , a__ : Tuple ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowercase ( a__ : Optional[Any] , a__ : int ) -> Optional[int]:
_UpperCamelCase = simple_accuracy(a__ , a__ )
_UpperCamelCase = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase ( a__ : Any , a__ : Union[str, Any] ) -> Any:
_UpperCamelCase = float(pearsonr(a__ , a__ )[0] )
_UpperCamelCase = float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _UpperCamelCase ( self : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Any:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 54 | """simple docstring"""
import qiskit
def lowercase ( a__ : int = 2 ) -> qiskit.result.counts.Counts:
_UpperCamelCase = qubits
# Using Aer's simulator
_UpperCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
_UpperCamelCase = qiskit.QuantumCircuit(a__ , a__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , a__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , a__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(a__ ) ) , list(range(a__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_UpperCamelCase = qiskit.execute(a__ , a__ , shots=1000 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 54 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowercase : Dict = ["small", "medium", "large"]
lowercase : Any = "lm_head.decoder.weight"
lowercase : Optional[Any] = "lm_head.weight"
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[str]:
_snake_case = torch.load(__A )
_snake_case = d.pop(__A )
os.makedirs(__A , exist_ok=__A )
torch.save(__A , os.path.join(__A , __A ) )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
lowercase : int = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowercase : List[Any] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
lowercase : Union[str, Any] = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 42 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=30 , _snake_case=2 , _snake_case=3 , _snake_case=True , _snake_case=True , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=10 , _snake_case=0.02 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ViTMSNModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = ViTMSNForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , labels=_snake_case )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTMSNForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
snake_case__ = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ViTMSNModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTMSNModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(2 )
lowerCAmelCase = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_snake_case )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**_snake_case )
# verify the logits
lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
lowerCAmelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
| 309 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''PoolFormerFeatureExtractor''']
UpperCamelCase = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306 | 1 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__snake_case = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__snake_case = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__snake_case = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
UpperCamelCase__ :List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 219 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__snake_case = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 219 | 1 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> list:
'''simple docstring'''
__lowerCamelCase : Tuple = len(_UpperCamelCase )
__lowerCamelCase : Optional[Any] = []
for i in range(len(_UpperCamelCase ) - pat_len + 1 ):
__lowerCamelCase : List[str] = True
for j in range(_UpperCamelCase ):
if s[i + j] != pattern[j]:
__lowerCamelCase : str = False
break
if match_found:
position.append(_UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC''')) | 135 |
"""simple docstring"""
import math
def lowerCamelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : Union[str, Any] = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
__UpperCAmelCase : Tuple = [True] * (end + 1)
__UpperCAmelCase : int = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
__UpperCAmelCase : Dict = False
start += 1
prime += in_prime
__UpperCAmelCase : Optional[int] = end + 1
__UpperCAmelCase : Dict = min(2 * end , _UpperCamelCase )
while low <= n:
__UpperCAmelCase : Union[str, Any] = [True] * (high - low + 1)
for each in in_prime:
__UpperCAmelCase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
__UpperCAmelCase : Tuple = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__UpperCAmelCase : Tuple = high + 1
__UpperCAmelCase : Optional[int] = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6))
| 115 | 0 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__lowerCAmelCase )] )
lowercase_ = np.array(__lowerCAmelCase )
lowercase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __lowerCAmelCase ) ) , x.transpose() ) , __lowerCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = (1, 2, 1)
lowercase_ = (1, 1, 0, 7)
lowercase_ = SARIMAX(
__lowerCAmelCase , exog=__lowerCAmelCase , order=__lowerCAmelCase , seasonal_order=__lowerCAmelCase )
lowercase_ = model.fit(disp=__lowerCAmelCase , maxiter=6_00 , method="""nm""" )
lowercase_ = model_fit.predict(1 , len(__lowerCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = regressor.predict(__lowerCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
train_user.sort()
lowercase_ = np.percentile(__lowerCAmelCase , 25 )
lowercase_ = np.percentile(__lowerCAmelCase , 75 )
lowercase_ = qa - qa
lowercase_ = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = 0
lowercase_ = 0
for i in list_vote:
if i > actual_result:
lowercase_ = not_safe + 1
else:
if abs(abs(__lowerCAmelCase ) - abs(__lowerCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase : List[str] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
UpperCAmelCase : Union[str, Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCAmelCase : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase : Dict = normalize_df[:, 2].tolist()
UpperCAmelCase : int = normalize_df[:, 0].tolist()
UpperCAmelCase : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase : str = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase : Optional[int] = x[: len(x) - 1]
UpperCAmelCase : List[str] = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase : str = total_date[: len(total_date) - 1]
UpperCAmelCase : Optional[Any] = total_user[: len(total_user) - 1]
UpperCAmelCase : str = total_match[: len(total_match) - 1]
UpperCAmelCase : Optional[int] = total_date[len(total_date) - 1 :]
UpperCAmelCase : Tuple = total_user[len(total_user) - 1 :]
UpperCAmelCase : Union[str, Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase : List[str] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase : Optional[Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 313 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
import random
def A ( a_ ,a_ ,a_ = False ) -> dict:
__UpperCamelCase : dict ={i: [] for i in range(a_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a_ ):
for j in range(i + 1 ,a_ ):
if random.random() < probability:
graph[i].append(a_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a_ )
return graph
def A ( a_ ) -> dict:
return {
i: [j for j in range(a_ ) if i != j] for i in range(a_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
import os
def __magic_name__ ( __lowerCAmelCase : str = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as in_file:
__lowerCamelCase = in_file.read()
__lowerCamelCase = [[int(__lowerCAmelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
__lowerCamelCase = [[0 for cell in row] for row in grid]
__lowerCamelCase = len(grid[0] )
__lowerCamelCase = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
__lowerCamelCase = grid[0][0]
for i in range(1 , __lowerCAmelCase ):
__lowerCamelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , __lowerCAmelCase ):
__lowerCamelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
__lowerCamelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 353 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Dict = """xmod"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
__lowerCamelCase = pre_norm
__lowerCamelCase = adapter_reduction_factor
__lowerCamelCase = adapter_layer_norm
__lowerCamelCase = adapter_reuse_layer_norm
__lowerCamelCase = ln_before_adapter
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = default_language
class lowerCAmelCase__ ( __lowercase ):
@property
def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 339 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a__ : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "The column name of the images in the files."})
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "A folder containing the training data."})
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "A folder containing the validation data."})
snake_case__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."})
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE = self.validation_dir
__SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"})
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : str = field(default=UpperCamelCase , metadata={"help": "Name or path of preprocessor config."})
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."})
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether or not to train with normalized pixel values as target."})
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : float = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."})
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE = ds["train"].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE = split["train"]
__SCREAMING_SNAKE_CASE = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(lowerCAmelCase_ )
if training_args.do_train:
__SCREAMING_SNAKE_CASE = ds["train"].column_names
else:
__SCREAMING_SNAKE_CASE = ds["validation"].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE = "image"
elif "img" in column_names:
__SCREAMING_SNAKE_CASE = "img"
else:
__SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__SCREAMING_SNAKE_CASE = image_processor.size["shortest_edge"]
else:
__SCREAMING_SNAKE_CASE = (image_processor.size["height"], image_processor.size["width"])
__SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda lowerCAmelCase_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCAmelCase_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [transforms(lowerCAmelCase_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCAmelCase_ )
# Compute absolute learning rate
__SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 54 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
import baseaa
def lowerCamelCase__ (_UpperCAmelCase):
return baseaa.aaaencode(string.encode('utf-8'))
def lowerCamelCase__ (_UpperCAmelCase):
return baseaa.aaadecode(_UpperCAmelCase).decode('utf-8')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase__ (_UpperCAmelCase):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set())
@pytest.fixture
def lowerCamelCase__ (_UpperCAmelCase):
class _snake_case :
def __init__( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE = metric_id
class _snake_case :
_lowercase : Optional[Any] = [MetricMock(A__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock())
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if "tmp_path" in args:
SCREAMING_SNAKE_CASE = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args)
with pytest.warns(_UpperCAmelCase , match='https://huggingface.co/docs/evaluate'):
func(*_UpperCAmelCase)
| 327 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A : Dict ={
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''CLIPFeatureExtractor''']
_A : List[Any] =['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =[
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 205 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
_A = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
_A = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
_A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
_A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 205 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> Any:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__lowerCAmelCase : Optional[int] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase : List[Any] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
__lowerCAmelCase : Dict = max(len(SCREAMING_SNAKE_CASE_ ) ,len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) ,b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 363 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __lt__( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self: int , _SCREAMING_SNAKE_CASE: Any) -> Tuple:
"""simple docstring"""
return self[-1] == other[-1]
def _lowercase ( __snake_case ) -> list:
__lowerCAmelCase : list[Stack] = []
# sort into stacks
for element in collection:
__lowerCAmelCase : Dict = Stack([element] )
__lowerCAmelCase : str = bisect_left(__snake_case ,__snake_case )
if i != len(__snake_case ):
stacks[i].append(__snake_case )
else:
stacks.append(__snake_case )
# use a heap-based merge to merge stack efficiently
__lowerCAmelCase : Union[str, Any] = merge(*(reversed(__snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Optional[int] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted)) | 58 | 0 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_A : Any = float('''nan''')
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = sys.stdout
__lowerCAmelCase = open(SCREAMING_SNAKE_CASE__ , """a""" )
def __getattr__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return getattr(self.stdout , SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
self.stdout.write(SCREAMING_SNAKE_CASE__ )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) )
def UpperCamelCase_ ( snake_case_ : str=80 , snake_case_ : Optional[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = []
# deal with critical env vars
__lowerCAmelCase = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
__lowerCAmelCase = os.environ.get(snake_case_ , snake_case_ )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
__lowerCAmelCase = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(snake_case_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__lowerCAmelCase = []
__lowerCAmelCase = """"""
while len(snake_case_ ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(snake_case_ ) == 0 or len(snake_case_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case_ )
__lowerCAmelCase = """"""
return "\\\n".join(snake_case_ )
def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
__lowerCAmelCase = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
__lowerCAmelCase = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Dict ) -> int:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
__lowerCAmelCase = subprocess.run(snake_case_ , capture_output=snake_case_ , text=snake_case_ )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
__lowerCAmelCase = variation.replace(""" """ , """-""" )
with open(Path(snake_case_ ) / f"""log.{prefix}.stdout.txt""" , """w""" ) as f:
f.write(result.stdout )
with open(Path(snake_case_ ) / f"""log.{prefix}.stderr.txt""" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = json.load(snake_case_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Tuple , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = f"""{id}: {variation:<{longest_variation_len}}"""
__lowerCAmelCase = f"""{preamble}: """
__lowerCAmelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case_ ) , desc=snake_case_ , leave=snake_case_ ):
__lowerCAmelCase = process_run_single(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = single_run_metrics[target_metric_key]
if not math.isnan(snake_case_ ):
metrics.append(snake_case_ )
results.append(snake_case_ )
outcome += "✓"
else:
outcome += "✘"
__lowerCAmelCase = f"""\33[2K\r{outcome}"""
if len(snake_case_ ) > 0:
__lowerCAmelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__lowerCAmelCase = round(mean_metrics[target_metric_key] , 2 )
__lowerCAmelCase = f"""{outcome} {mean_target}"""
if len(snake_case_ ) > 1:
results_str += f""" {tuple(round(snake_case_ , 2 ) for x in results )}"""
print(snake_case_ )
__lowerCAmelCase = variation
return mean_metrics
else:
print(snake_case_ )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = pd.DataFrame(snake_case_ )
__lowerCAmelCase = """variation"""
__lowerCAmelCase = """diff_%"""
__lowerCAmelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__lowerCAmelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case_ ):
# as a fallback, use the minimal value as the sentinel
__lowerCAmelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case_ ):
__lowerCAmelCase = df.apply(
lambda snake_case_ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
__lowerCAmelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__lowerCAmelCase = df.reindex(snake_case_ , axis="""columns""" ) # reorder cols
# capitalize
__lowerCAmelCase = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
__lowerCAmelCase = df.rename(lambda snake_case_ : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
__lowerCAmelCase = df.rename(lambda snake_case_ : c.replace("""_""" , """\n""" ) , axis="""columns""" )
__lowerCAmelCase = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case_ , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case_ , floatfmt=""".2f""" )]
print("""\n\n""".join(snake_case_ ) )
def UpperCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=snake_case_ , type=snake_case_ , nargs="""+""" , required=snake_case_ , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=snake_case_ , type=snake_case_ , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=snake_case_ , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=snake_case_ , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=snake_case_ , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=snake_case_ , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.output_dir
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
__lowerCAmelCase = get_base_command(snake_case_ , snake_case_ )
# split each dimension into its --foo variations
__lowerCAmelCase = [list(map(str.strip , re.split(r"""\|""" , snake_case_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__lowerCAmelCase = list(map(str.strip , map(""" """.join , itertools.product(*snake_case_ ) ) ) )
__lowerCAmelCase = max(len(snake_case_ ) for x in variations )
# split wanted keys
__lowerCAmelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
__lowerCAmelCase = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
__lowerCAmelCase = Tee(snake_case_ )
print(f"""\n*** Running {len(snake_case_ )} benchmarks:""" )
print(f"""Base command: {" ".join(snake_case_ )}""" )
__lowerCAmelCase = """variation"""
__lowerCAmelCase = []
for id, variation in enumerate(tqdm(snake_case_ , desc="""Total completion: """ , leave=snake_case_ ) ):
__lowerCAmelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , args.target_metric_key , snake_case_ , args.repeat_times , snake_case_ , args.verbose , ) )
process_results(snake_case_ , args.target_metric_key , snake_case_ , args.base_variation , snake_case_ )
if __name__ == "__main__":
main()
| 229 | '''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_A : List[Any] = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
_A : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = """https://pypi.org/pypi/diffusers/json"""
__lowerCAmelCase = json.loads(request.urlopen(snake_case_ ).read() )["""releases"""].keys()
return sorted(snake_case_ , key=lambda snake_case_ : version.Version(snake_case_ ) )
def UpperCamelCase_ ( ) -> int:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__lowerCAmelCase = Path(snake_case_ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] ) -> Dict:
'''simple docstring'''
init_hf_modules()
__lowerCAmelCase = Path(snake_case_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__lowerCAmelCase = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCamelCase_ ( snake_case_ : int ) -> str:
'''simple docstring'''
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.read()
# Imports of the form `import .xxx`
__lowerCAmelCase = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case_ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case_ ) )
def UpperCamelCase_ ( snake_case_ : int ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = False
__lowerCAmelCase = [module_file]
__lowerCAmelCase = []
# Let's recurse through all relative imports
while not no_change:
__lowerCAmelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case_ ) )
__lowerCAmelCase = Path(snake_case_ ).parent
__lowerCAmelCase = [str(module_path / m ) for m in new_imports]
__lowerCAmelCase = [f for f in new_import_files if f not in all_relative_imports]
__lowerCAmelCase = [f"""{f}.py""" for f in new_import_files]
__lowerCAmelCase = len(snake_case_ ) == 0
all_relative_imports.extend(snake_case_ )
return all_relative_imports
def UpperCamelCase_ ( snake_case_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.read()
# Imports of the form `import xxx`
__lowerCAmelCase = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case_ , flags=re.MULTILINE )
# Only keep the top-level module
__lowerCAmelCase = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
__lowerCAmelCase = list(set(snake_case_ ) )
__lowerCAmelCase = []
for imp in imports:
try:
importlib.import_module(snake_case_ )
except ImportError:
missing_packages.append(snake_case_ )
if len(snake_case_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"""{", ".join(snake_case_ )}. Run `pip install {" ".join(snake_case_ )}`""" )
return get_relative_imports(snake_case_ )
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = module_path.replace(os.path.sep , """.""" )
__lowerCAmelCase = importlib.import_module(snake_case_ )
if class_name is None:
return find_pipeline_class(snake_case_ )
return getattr(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
__lowerCAmelCase = dict(inspect.getmembers(snake_case_ , inspect.isclass ) )
__lowerCAmelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
__lowerCAmelCase = cls
return pipeline_class
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
__lowerCAmelCase = str(snake_case_ )
__lowerCAmelCase = os.path.join(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__lowerCAmelCase = module_file_or_url
__lowerCAmelCase = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
__lowerCAmelCase = get_diffusers_versions()
# cut ".dev0"
__lowerCAmelCase = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
__lowerCAmelCase = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__lowerCAmelCase = f"""v{revision}"""
elif revision == "main":
__lowerCAmelCase = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
__lowerCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=snake_case_ , pipeline=snake_case_ )
try:
__lowerCAmelCase = cached_download(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , use_auth_token=snake_case_ , )
__lowerCAmelCase = """git"""
__lowerCAmelCase = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__lowerCAmelCase = hf_hub_download(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , use_auth_token=snake_case_ , )
__lowerCAmelCase = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__lowerCAmelCase = check_imports(snake_case_ )
# Now we move the module inside our cached dynamic modules.
__lowerCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case_ )
__lowerCAmelCase = Path(snake_case_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case_ , submodule_path / module_file )
for module_needed in modules_needed:
__lowerCAmelCase = f"""{module_needed}.py"""
shutil.copy(os.path.join(snake_case_ , snake_case_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = use_auth_token
elif use_auth_token is True:
__lowerCAmelCase = HfFolder.get_token()
else:
__lowerCAmelCase = None
__lowerCAmelCase = model_info(snake_case_ , revision=snake_case_ , token=snake_case_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__lowerCAmelCase = submodule_path / commit_hash
__lowerCAmelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case_ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case_ , f"""{module_needed}.py""" , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
return os.path.join(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : Dict , ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = get_cached_module_file(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
return get_class_in_module(snake_case_ , final_module.replace(""".py""" , """""" ) )
| 229 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : str = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ['DPTFeatureExtractor']
snake_case_ : List[Any] = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236 |
'''simple docstring'''
from torch import nn
def A__ ( UpperCAmelCase_ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 236 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = OpenAIGPTTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTTokenizerFast
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = False
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__UpperCAmelCase : List[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCAmelCase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
return "lower newer", "lower newer"
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCAmelCase : Union[str, Any] = """lower"""
__UpperCAmelCase : Dict = ["""low""", """er</w>"""]
__UpperCAmelCase : int = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Any = tokens + ["""<unk>"""]
__UpperCAmelCase : Union[str, Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# Simple input
__UpperCAmelCase : Dict = """This is a simple input"""
__UpperCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
__UpperCAmelCase : str = ("""This is a simple input""", """This is a pair""")
__UpperCAmelCase : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" , )
def __A ( self ) -> str:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _A ( __SCREAMING_SNAKE_CASE ):
pass
| 254 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any="attention" ):
"""simple docstring"""
__UpperCAmelCase : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
__UpperCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCAmelCase : Tuple = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCAmelCase : List[str] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCAmelCase : Optional[Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
__UpperCAmelCase : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=False ):
"""simple docstring"""
if split_mlp_wi:
__UpperCAmelCase : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
__UpperCAmelCase : Dict = (wi_a, wi_a)
else:
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
__UpperCAmelCase : Tuple = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase_ ( lowerCAmelCase__ : dict , *, lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__UpperCAmelCase : Tuple = traverse_util.flatten_dict(variables["""target"""] )
__UpperCAmelCase : Union[str, Any] = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase : Any = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
__UpperCAmelCase : Any = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
__UpperCAmelCase : Any = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : Optional[int] = o.T
__UpperCAmelCase : str = q.T
__UpperCAmelCase : Any = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase : List[str] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : int = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : List[Any] = wi[0].T
__UpperCAmelCase : Any = wi[1].T
else:
__UpperCAmelCase : Tuple = wi.T
__UpperCAmelCase : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" ).T
__UpperCAmelCase : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__UpperCAmelCase : Any = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """encoder""" ).T
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : str = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
__UpperCAmelCase : int = layer_norm
__UpperCAmelCase : Optional[Any] = k.T
__UpperCAmelCase : Dict = o.T
__UpperCAmelCase : int = q.T
__UpperCAmelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase : Any = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
__UpperCAmelCase : Union[str, Any] = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : int = o.T
__UpperCAmelCase : Optional[int] = q.T
__UpperCAmelCase : Optional[int] = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase : Tuple = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : Any = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : Optional[Any] = wi[0].T
__UpperCAmelCase : Optional[int] = wi[1].T
else:
__UpperCAmelCase : str = wi.T
__UpperCAmelCase : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Union[str, Any] = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" ).T
__UpperCAmelCase : Dict = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase : List[str] = old["""decoder/logits_dense/kernel"""].T
return new
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : List[str] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__UpperCAmelCase : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Tuple = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
__UpperCAmelCase : Any = convert_tax_to_pytorch(
lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ )
__UpperCAmelCase : str = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MTaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase : List[Any] = UMTaEncoderModel(lowerCAmelCase__ )
else:
__UpperCAmelCase : Dict = UMTaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 254 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase__ ( nn.Module ):
a__ : int
a__ : int
a__ : float = 0.0
a__ : int = 1
a__ : int = 1
a__ : bool = True
a__ : bool = False
a__ : bool = False
a__ : bool = False
a__ : jnp.dtype = jnp.floataa
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=True ) -> Optional[int]:
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(SCREAMING_SNAKE_CASE__ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase__ ( nn.Module ):
a__ : int
a__ : int
a__ : float = 0.0
a__ : int = 1
a__ : bool = True
a__ : jnp.dtype = jnp.floataa
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> Union[str, Any]:
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(SCREAMING_SNAKE_CASE__ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase__ ( nn.Module ):
a__ : int
a__ : int
a__ : int
a__ : float = 0.0
a__ : int = 1
a__ : int = 1
a__ : bool = True
a__ : bool = False
a__ : bool = False
a__ : bool = False
a__ : jnp.dtype = jnp.floataa
def __A ( self : str ) -> Dict:
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=True ) -> List[str]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
a__ : int
a__ : int
a__ : int
a__ : float = 0.0
a__ : int = 1
a__ : bool = True
a__ : jnp.dtype = jnp.floataa
def __A ( self : Tuple ) -> int:
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=True ) -> Dict:
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
a__ : int
a__ : float = 0.0
a__ : int = 1
a__ : int = 1
a__ : bool = False
a__ : bool = False
a__ : jnp.dtype = jnp.floataa
def __A ( self : List[str] ) -> str:
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> Any:
__lowerCamelCase = self.resnets[0](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ )
return hidden_states
| 351 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 339 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
a_ = logging.getLogger(__name__)
class lowercase__ ( _UpperCAmelCase ):
a_ ="""masked_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase="topK" , __UpperCAmelCase="constant" , __UpperCAmelCase=0.0 , **__UpperCAmelCase , )-> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = pruning_method
lowerCAmelCase__ = mask_init
lowerCAmelCase__ = mask_scale
| 340 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
a_ = {
"google/bigbird-roberta-base": 40_96,
"google/bigbird-roberta-large": 40_96,
"google/bigbird-base-trivia-itc": 40_96,
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = []
def __init__( self : int , a__ : Tuple , a__ : Dict="<unk>" , a__ : Optional[Any]="<s>" , a__ : Tuple="</s>" , a__ : Optional[Any]="<pad>" , a__ : List[Any]="[SEP]" , a__ : Union[str, Any]="[MASK]" , a__ : Union[str, Any]="[CLS]" , a__ : Optional[Dict[str, Any]] = None , **a__ : Optional[Any] , ) -> None:
'''simple docstring'''
_A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
_A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
_A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
_A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
_A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
_A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , sep_token=a__ , mask_token=a__ , cls_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def a_ ( self : str ) -> str:
'''simple docstring'''
return self.sp_model.get_piece_size()
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : Optional[int] , a__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def a_ ( self : str , a__ : Any ) -> int:
'''simple docstring'''
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Optional[Any] , a__ : List[str] ) -> List[Any]:
'''simple docstring'''
_A = self.sp_model.IdToPiece(a__ )
return token
def a_ ( self : str , a__ : Dict ) -> Dict:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : List[int] , a__ : bool = False , a__ : bool = None , a__ : bool = True , **a__ : Dict , ) -> str:
'''simple docstring'''
_A = kwargs.pop("use_source_tokenizer" , a__ )
_A = self.convert_ids_to_tokens(a__ , skip_special_tokens=a__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_A = []
_A = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a__ ) )
_A = []
sub_texts.append(a__ )
else:
current_sub_text.append(a__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_A = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(a__ ) )
else:
_A = "".join(a__ )
_A = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_A = self.clean_up_tokenization(a__ )
return clean_text
else:
return text
def a_ ( self : List[str] , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
def a_ ( self : str , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 163 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'ctrl'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , a__ : Union[str, Any]=24_65_34 , a__ : int=2_56 , a__ : Any=12_80 , a__ : Optional[int]=81_92 , a__ : Union[str, Any]=48 , a__ : Optional[int]=16 , a__ : List[str]=0.1 , a__ : List[str]=0.1 , a__ : Optional[int]=1E-6 , a__ : Optional[int]=0.0_2 , a__ : Tuple=True , **a__ : List[Any] , ) -> Tuple:
'''simple docstring'''
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = dff
_A = resid_pdrop
_A = embd_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
super().__init__(**a__ ) | 163 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "data2vec-text"
def __init__( self : Optional[int] , _lowercase : Tuple=3_05_22 , _lowercase : Optional[int]=7_68 , _lowercase : List[str]=12 , _lowercase : Dict=12 , _lowercase : List[str]=30_72 , _lowercase : Optional[Any]="gelu" , _lowercase : Any=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : List[str]=5_12 , _lowercase : Union[str, Any]=2 , _lowercase : Dict=0.02 , _lowercase : Tuple=1E-12 , _lowercase : int=1 , _lowercase : List[str]=0 , _lowercase : int=2 , _lowercase : List[str]="absolute" , _lowercase : Tuple=True , _lowercase : Union[str, Any]=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : str ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self : Tuple , _lowercase : str , _lowercase : str ):
__UpperCAmelCase , __UpperCAmelCase = text, pattern
__UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase )
def a ( self : Optional[int] , _lowercase : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def a ( self : int , _lowercase : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a ( self : Optional[Any] ):
# searches pattern in text and returns index positions
__UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCAmelCase = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
__UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase : str = 'ABAABA'
_lowercase : Tuple = 'AB'
_lowercase : Dict = BoyerMooreSearch(text, pattern)
_lowercase : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 332 | 1 |
lowerCAmelCase__ = 'Tobias Carryer'
from time import time
class a_ :
'''simple docstring'''
def __init__( self : int , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : List[str]=int(time())): # noqa: B008
'''simple docstring'''
lowerCAmelCase__ = multiplier
lowerCAmelCase__ = increment
lowerCAmelCase__ = modulo
lowerCAmelCase__ = seed
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCAmelCase__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 119 | import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = {}
with open(lowerCAmelCase__ , 'r' ) as file:
for line_number, line in enumerate(lowerCAmelCase__ ):
lowerCAmelCase__ = line.strip()
if line:
lowerCAmelCase__ = line.split()
lowerCAmelCase__ = line_number
lowerCAmelCase__ = words[0]
lowerCAmelCase__ = value
return result
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCAmelCase__ = 'param'
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = hf_pointer
for attribute in hf_param_name.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase__ = value[0]
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCAmelCase__ = 'param'
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = '.'.join([key, hf_param_name] )
else:
lowerCAmelCase__ = key
lowerCAmelCase__ = value if 'lm_head' in full_key else value[0]
lowerCAmelCase__ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase__ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , lowerCAmelCase__ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = 'weight'
else:
lowerCAmelCase__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return is_used
return is_used
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = load_wavaveca_layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ = name.split('.' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False ):
if config_path is not None:
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
else:
lowerCAmelCase__ = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase__ = read_txt_into_dict(lowerCAmelCase__ )
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = WavaVecaForSequenceClassification(lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
feature_extractor.save_pretrained(lowerCAmelCase__ )
elif is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCAmelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , )
lowerCAmelCase__ = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaForCTC(lowerCAmelCase__ )
else:
lowerCAmelCase__ = WavaVecaForPreTraining(lowerCAmelCase__ )
if is_finetuned or is_seq_class:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='audio_pretraining' )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 119 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mvp'''
snake_case = ['''past_key_values''']
snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , __UpperCAmelCase : Any=50267 , __UpperCAmelCase : Tuple=1024 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Tuple=4096 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[Any]=4096 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=1 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Tuple=100 , __UpperCAmelCase : Union[str, Any]=800 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = classifier_dropout
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = use_prompt
_A = prompt_length
_A = prompt_mid_dim
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __UpperCAmelCase ):
_A = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 79 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = CycleDiffusionPipeline
snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A = CLIPTextModel(__UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ):
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("mps" ):
_A = torch.manual_seed(__UpperCAmelCase )
else:
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_A = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCAmelCase , "half" ):
_A = module.half()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 79 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a_ ( _lowerCAmelCase ):
def __init__( self : List[str] , lowercase : Union[str, Any] , lowercase : Tuple=13 , lowercase : Dict=7 , lowercase : Optional[Any]=True , lowercase : Union[str, Any]=True , lowercase : str=False , lowercase : List[str]=True , lowercase : Tuple=99 , lowercase : Optional[int]=32 , lowercase : List[str]=5 , lowercase : Dict=4 , lowercase : List[str]=37 , lowercase : Any="gelu" , lowercase : Tuple=0.1 , lowercase : Any=0.1 , lowercase : List[Any]=512 , lowercase : Optional[Any]=16 , lowercase : Union[str, Any]=2 , lowercase : Optional[Any]=0.02 , lowercase : Optional[int]=3 , lowercase : Tuple=4 , lowercase : str=None , ):
"""simple docstring"""
lowercase_ :Optional[int] = parent
lowercase_ :Tuple = batch_size
lowercase_ :Optional[Any] = seq_length
lowercase_ :List[Any] = is_training
lowercase_ :List[str] = use_input_mask
lowercase_ :Tuple = use_token_type_ids
lowercase_ :List[str] = use_labels
lowercase_ :int = vocab_size
lowercase_ :List[Any] = hidden_size
lowercase_ :str = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :Union[str, Any] = intermediate_size
lowercase_ :Any = hidden_act
lowercase_ :str = hidden_dropout_prob
lowercase_ :Dict = attention_probs_dropout_prob
lowercase_ :List[str] = max_position_embeddings
lowercase_ :str = type_vocab_size
lowercase_ :Optional[Any] = type_sequence_label_size
lowercase_ :Dict = initializer_range
lowercase_ :Dict = num_labels
lowercase_ :int = num_choices
lowercase_ :Union[str, Any] = scope
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Optional[int] = None
if self.use_input_mask:
lowercase_ :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :Optional[int] = None
lowercase_ :Union[str, Any] = None
lowercase_ :Dict = None
if self.use_labels:
lowercase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : str , lowercase : Any , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Optional[Any] = DistilBertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :str = model(lowercase , lowercase )
lowercase_ :int = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : str ):
"""simple docstring"""
lowercase_ :int = DistilBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :str = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict , lowercase : Dict ):
"""simple docstring"""
lowercase_ :str = DistilBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[Any] = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Dict , lowercase : int , lowercase : Dict , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.num_labels
lowercase_ :Any = DistilBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowercase_ :List[Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowercase : Optional[int] , lowercase : int , lowercase : Optional[int] , lowercase : Dict , lowercase : Any , lowercase : Optional[Any] ):
"""simple docstring"""
lowercase_ :Tuple = self.num_labels
lowercase_ :int = DistilBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :List[str] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , lowercase : Tuple , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Any , lowercase : int , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Any = self.num_choices
lowercase_ :List[str] = DistilBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ :Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ :Union[str, Any] = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Tuple = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) :str = config_and_inputs
lowercase_ :str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__A = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
__A = True
__A = True
__A = True
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Dict = DistilBertModelTester(self )
lowercase_ :int = ConfigTester(self , config_class=lowercase , dim=37 )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
@slow
def lowercase__ ( self : str ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Tuple = DistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ , lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase_ :Dict = True
lowercase_ :str = model_class(config=lowercase )
lowercase_ :List[str] = self._prepare_for_class(lowercase , lowercase )
lowercase_ :Optional[int] = torch.jit.trace(
lowercase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , "traced_model.pt" ) )
lowercase_ :int = torch.jit.load(os.path.join(lowercase , "traced_model.pt" ) , map_location=lowercase )
loaded(inputs_dict["input_ids"].to(lowercase ) , inputs_dict["attention_mask"].to(lowercase ) )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :List[str] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase_ :Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase_ :List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ :Dict = model(lowercase , attention_mask=lowercase )[0]
lowercase_ :Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowercase_ :List[str] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) )
| 147 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Dict ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ :List[Any] = logging.get_logger(__name__)
a_ :str = "https://openaipublic.azureedge.net/jukebox/models/"
a_ :Optional[Any] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def lowercase_ (A : int ):
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 1_0:
snake_case__ : Tuple = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 1_0:
snake_case__ : List[Any] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 1_0:
snake_case__ : Dict = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 1_0:
snake_case__ : int = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
snake_case__ : Optional[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
snake_case__ : Dict = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case__ : Union[str, Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
snake_case__ : List[Any] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowercase_ (A : str , A : Any , A : int , A : int ):
snake_case__ : List[str] = {}
import re
snake_case__ : Dict = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
snake_case__ : Dict = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : Any = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : str = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
snake_case__ : List[Any] = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : Optional[int] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
snake_case__ : int = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : Any = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
snake_case__ : str = re_encoder_block_conv_in.match(A )
snake_case__ : Any = regex_match.groups()
snake_case__ : List[str] = int(groups[2] ) * 2 + int(groups[3] )
snake_case__ : str = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
snake_case__ : str = re_encoder_block_conv_in.sub(A , A )
elif re_encoder_block_resnet.fullmatch(A ):
snake_case__ : Tuple = re_encoder_block_resnet.match(A )
snake_case__ : str = regex_match.groups()
snake_case__ : Any = int(groups[2] ) * 2 + int(groups[3] )
snake_case__ : int = {'1': 1, '3': 2}[groups[-2]]
snake_case__ : List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
snake_case__ : str = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case__ : int = prefix + resnet_block
snake_case__ : Optional[Any] = re_encoder_block_resnet.sub(A , A )
elif re_encoder_block_proj_out.fullmatch(A ):
snake_case__ : Dict = re_encoder_block_proj_out.match(A )
snake_case__ : Any = regex_match.groups()
snake_case__ : Optional[int] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
snake_case__ : int = re_encoder_block_proj_out.sub(A , A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
snake_case__ : str = re_decoder_block_conv_out.match(A )
snake_case__ : Any = regex_match.groups()
snake_case__ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case__ : Union[str, Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
snake_case__ : Optional[Any] = re_decoder_block_conv_out.sub(A , A )
elif re_decoder_block_resnet.fullmatch(A ):
snake_case__ : str = re_decoder_block_resnet.match(A )
snake_case__ : Optional[int] = regex_match.groups()
snake_case__ : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case__ : str = {'1': 1, '3': 2}[groups[-2]]
snake_case__ : Dict = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
snake_case__ : Tuple = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case__ : Tuple = prefix + resnet_block
snake_case__ : Dict = re_decoder_block_resnet.sub(A , A )
elif re_decoder_block_proj_in.fullmatch(A ):
snake_case__ : List[str] = re_decoder_block_proj_in.match(A )
snake_case__ : Any = regex_match.groups()
snake_case__ : int = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
snake_case__ : Tuple = re_decoder_block_proj_in.sub(A , A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
snake_case__ : int = re_prior_cond_conv_out.match(A )
snake_case__ : List[str] = regex_match.groups()
snake_case__ : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case__ : Optional[int] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
snake_case__ : List[str] = re_prior_cond_conv_out.sub(A , A )
elif re_prior_cond_resnet.fullmatch(A ):
snake_case__ : Tuple = re_prior_cond_resnet.match(A )
snake_case__ : int = regex_match.groups()
snake_case__ : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case__ : Optional[int] = {'1': 1, '3': 2}[groups[-2]]
snake_case__ : Tuple = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
snake_case__ : str = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case__ : Union[str, Any] = prefix + resnet_block
snake_case__ : Union[str, Any] = re_prior_cond_resnet.sub(A , A )
elif re_prior_cond_proj_in.fullmatch(A ):
snake_case__ : List[str] = re_prior_cond_proj_in.match(A )
snake_case__ : Dict = regex_match.groups()
snake_case__ : str = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
snake_case__ : Optional[int] = re_prior_cond_proj_in.sub(A , A )
# keep original key
else:
snake_case__ : Any = original_key
snake_case__ : Any = replace_key(A )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
snake_case__ : Dict = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
snake_case__ : Dict = original_key
snake_case__ : List[str] = original_key
snake_case__ : Union[str, Any] = value
return new_dict
@torch.no_grad()
def lowercase_ (A : List[Any]=None , A : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
snake_case__ : Optional[Any] = requests.get(F'''{PREFIX}{file}''' , allow_redirects=A )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=A )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , 'wb' ).write(r.content )
snake_case__ : Dict = MODEL_MAPPING[model_name.split('/' )[-1]]
snake_case__ : int = JukeboxConfig.from_pretrained(A )
snake_case__ : int = JukeboxModel(A )
snake_case__ : Union[str, Any] = []
snake_case__ : List[str] = {}
for i, dict_name in enumerate(A ):
snake_case__ : Union[str, Any] = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['model']
snake_case__ : Optional[int] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
snake_case__ : str = old_dic[k]
elif k.endswith('.w' ):
snake_case__ : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case__ : Tuple = old_dic[k]
else:
snake_case__ : List[str] = old_dic[k]
snake_case__ : List[str] = 'vqvae' if i == 0 else F'''priors.{3 - i}'''
snake_case__ : Tuple = fix_jukebox_keys(A , model.state_dict() , A , A )
weight_dict.append(A )
snake_case__ : int = weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , 'w' ) as txtfile:
json.dump(A , A )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
a_ :str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 277 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ (A : str , A : List[Any] , A : Any ):
# Initialise PyTorch model
snake_case__ : List[Any] = LxmertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : List[str] = LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
a_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 277 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Union[str, Any] = XLMProphetNetTokenizer
UpperCamelCase_ :Any = False
UpperCamelCase_ :Any = True
def UpperCAmelCase_ ( self )-> Any:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = "[PAD]"
UpperCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowercase ) , 1_012 )
def UpperCAmelCase_ ( self )-> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCAmelCase_ ( self )-> Dict:
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = "Hello World!"
UpperCamelCase_ = [35_389, 6_672, 49, 2]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
# fmt: off
UpperCamelCase_ = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 60 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[int] = XGLMTokenizer
UpperCamelCase_ :List[str] = XGLMTokenizerFast
UpperCamelCase_ :int = True
UpperCamelCase_ :Dict = True
def UpperCAmelCase_ ( self )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = "<pad>"
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(_lowercase ) , 1_008 )
def UpperCAmelCase_ ( self )-> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowercase , f.name )
UpperCamelCase_ = XGLMTokenizer(f.name , keep_accents=_lowercase )
UpperCamelCase_ = pickle.dumps(_lowercase )
pickle.loads(_lowercase )
def UpperCAmelCase_ ( self )-> str:
if not self.test_rust_tokenizer:
return
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = "I was born in 92000, and this is falsé."
UpperCamelCase_ = tokenizer.tokenize(_lowercase )
UpperCamelCase_ = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = tokenizer.encode(_lowercase )
UpperCamelCase_ = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = "Hello World!"
UpperCamelCase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
UpperCamelCase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
# fmt: off
UpperCamelCase_ = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="facebook/xglm-564M" , padding=_lowercase , )
| 60 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 |
import math
import qiskit
def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
__lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowerCAmelCase: Any = [input_a, input_a, carry_in]
__lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 322 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = split_dict._to_yaml_list()
assert len(lowercase ) == len(lowercase )
UpperCamelCase = SplitDict._from_yaml_list(lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def A ( lowercase ) -> List[str]:
'''simple docstring'''
UpperCamelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 355 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = LlamaModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowercase : str = (LlamaForCausalLM,) if is_torch_available() else ()
__lowercase : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : int = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = LlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
UpperCamelCase = original_model(A_ ).last_hidden_state
UpperCamelCase = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 10.0}
UpperCamelCase = LlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
UpperCamelCase = scaled_model(A_ ).last_hidden_state
UpperCamelCase = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
UpperCamelCase = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
UpperCamelCase = 'Simply put, the theory of relativity states that '
UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
UpperCamelCase = tokenizer.encode(A_ , return_tensors='pt' )
UpperCamelCase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=A_ )
# greedy generation outputs
UpperCamelCase = model.generate(A_ , max_new_tokens=64 , top_p=A_ , temperature=1 , do_sample=A_ )
UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 110 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : int=2_8_1_2_3 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ : int =set()
SCREAMING_SNAKE_CASE__ : Tuple =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCamelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 152 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""input_features""", """attention_mask"""]
def __init__( self : Tuple , __lowercase : Optional[int]=80 , __lowercase : Optional[int]=1_60_00 , __lowercase : str=80 , __lowercase : Dict=0.0 , __lowercase : int=True , __lowercase : List[Any]=True , __lowercase : List[Any]=True , **__lowercase : int , ) -> int:
super().__init__(feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_ceptral_normalize
SCREAMING_SNAKE_CASE__ : List[str] =normalize_means
SCREAMING_SNAKE_CASE__ : Optional[int] =normalize_vars
SCREAMING_SNAKE_CASE__ : Tuple =True
def __magic_name__ ( self : Tuple , __lowercase : np.ndarray , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.from_numpy(__lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =ta_kaldi.fbank(__lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __magic_name__ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : Optional[bool] = True , __lowercase : Optional[bool] = True , __lowercase : float = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
SCREAMING_SNAKE_CASE__ : str =x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.subtract(__lowercase , __lowercase )
if normalize_vars:
SCREAMING_SNAKE_CASE__ : Tuple =x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.divide(__lowercase , __lowercase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ : Tuple =padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ : str =x.astype(np.floataa )
return x
def __magic_name__ ( self : Optional[int] , __lowercase : List[np.ndarray] , __lowercase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
SCREAMING_SNAKE_CASE__ : int =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowercase , __lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowercase , __lowercase )
]
def __call__( self : int , __lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowercase : Union[bool, str, PaddingStrategy] = False , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , **__lowercase : str , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ : List[str] =isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE__ : str =is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE__ : int =np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ : Tuple =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ : List[str] =[raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ : Tuple =[self._extract_fbank_features(__lowercase ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ : Optional[int] =BatchFeature({'''input_features''': features} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.pad(
__lowercase , padding=__lowercase , max_length=__lowercase , truncation=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ : Optional[Any] =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowercase ):
SCREAMING_SNAKE_CASE__ : Any =[np.asarray(__lowercase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ : Dict =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : str =[np.asarray(__lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE__ : str =(
np.array(__lowercase , dtype=np.intaa )
if self._get_padding_strategies(__lowercase , max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE__ : int =self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowercase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ : Dict =padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs | 152 | 1 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , )
lowercase__ : List[Any]= Generator(
cache_dir=snake_case__ , features=snake_case__ , generator=snake_case__ , gen_kwargs=snake_case__ , **snake_case__ , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# Build iterable dataset
if self.streaming:
lowercase__ : Optional[int]= self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
lowercase__ : str= None
lowercase__ : List[Any]= None
lowercase__ : List[Any]= None
lowercase__ : Dict= None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , )
lowercase__ : Any= self.builder.as_dataset(
split="train" , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
| 150 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = """pt"""
elif is_tf_available():
a : Union[str, Any] = """tf"""
else:
a : Any = """jax"""
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PerceiverTokenizer
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ : str= PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
'''simple docstring'''
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowercase__ : Union[str, Any]= []
for i in range(len(snake_case__ ) ):
try:
lowercase__ : Any= tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ : int= list(filter(lambda snake_case__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case__ ) )
lowercase__ : Union[str, Any]= list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case__ ) , snake_case__ ) )
if max_length is not None and len(snake_case__ ) > max_length:
lowercase__ : int= toks[:max_length]
if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0:
while len(snake_case__ ) < min_length:
lowercase__ : List[str]= toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ : str= [t[0] for t in toks]
# Ensure consistency
lowercase__ : Optional[Any]= tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
if " " not in output_txt and len(snake_case__ ) > 1:
lowercase__ : Dict= (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ )
)
if with_prefix_space:
lowercase__ : List[Any]= " " + output_txt
lowercase__ : Any= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
return output_txt, output_ids
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.perceiver_tokenizer
lowercase__ : Union[str, Any]= "Unicode €."
lowercase__ : Tuple= tokenizer(snake_case__ )
lowercase__ : Dict= [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowercase__ : List[str]= tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]Unicode €.[SEP]" )
lowercase__ : List[str]= tokenizer("e è é ê ë" )
lowercase__ : int= [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowercase__ : Any= tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.perceiver_tokenizer
lowercase__ : Tuple= ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ : List[Any]= [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowercase__ : Dict= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
if FRAMEWORK != "jax":
lowercase__ : List[Any]= list(batch.input_ids.numpy()[0] )
else:
lowercase__ : str= list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.perceiver_tokenizer
lowercase__ : int= ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : Union[str, Any]= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case__ )
self.assertIn("attention_mask" , snake_case__ )
self.assertNotIn("decoder_input_ids" , snake_case__ )
self.assertNotIn("decoder_attention_mask" , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.perceiver_tokenizer
lowercase__ : int= [
"Summary of the text.",
"Another summary.",
]
lowercase__ : List[Any]= tokenizer(
text_target=snake_case__ , max_length=32 , padding="max_length" , truncation=snake_case__ , return_tensors=snake_case__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# safety check on max_len default value so we are sure the test works
lowercase__ : Union[str, Any]= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase__ : List[str]= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ : str= tempfile.mkdtemp()
lowercase__ : str= " He is very happy, UNwant\u00E9d,running"
lowercase__ : int= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowercase__ : List[str]= tokenizer.__class__.from_pretrained(snake_case__ )
lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
shutil.rmtree(snake_case__ )
lowercase__ : List[str]= self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ : Tuple= tempfile.mkdtemp()
lowercase__ : List[Any]= " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ : Tuple= tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ : List[str]= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ )
lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowercase__ : Optional[Any]= json.load(snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowercase__ : Optional[Any]= json.load(snake_case__ )
lowercase__ : List[str]= [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ : Optional[int]= added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ : Union[str, Any]= added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ : Optional[int]= tokenizer_class.from_pretrained(
snake_case__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ : int= added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case__ )]
lowercase__ : int= tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowercase__ : Optional[int]= self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ : Optional[int]= ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
lowercase__ : Optional[int]= tokenizer.convert_tokens_to_string(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
| 150 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def A (__A : Tuple ) -> int:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def A () -> Dict:
"""simple docstring"""
UpperCAmelCase_ = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__A )
UpperCAmelCase_ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__A )
EnvironmentCommand.register_subcommand(__A )
TestCommand.register_subcommand(__A )
RunBeamCommand.register_subcommand(__A )
DummyDataCommand.register_subcommand(__A )
# Parse args
UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args()
if not hasattr(__A , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase_ = parse_unknown_args(__A )
# Run
UpperCAmelCase_ = args.func(__A , **__A )
service.run()
if __name__ == "__main__":
main()
| 51 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ : Optional[Any] = "pt"
elif is_tf_available():
snake_case_ : Union[str, Any] = "tf"
else:
snake_case_ : str = "jax"
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ByTaTokenizer
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''')
def lowerCamelCase ( self : List[str] , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : Tuple=False , _snake_case : Dict=20 , _snake_case : Optional[Any]=5):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case)
except UnicodeDecodeError:
pass
toks.append((i, tok))
UpperCAmelCase_ = list(filter(lambda _snake_case: re.match(r'''^[ a-zA-Z]+$''' , t[1]) , _snake_case))
UpperCAmelCase_ = list(filter(lambda _snake_case: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case) , _snake_case))
if max_length is not None and len(_snake_case) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(_snake_case) < min_length and len(_snake_case) > 0:
while len(_snake_case) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case)
if " " not in output_txt and len(_snake_case) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case)
)
if with_prefix_space:
UpperCAmelCase_ = ''' ''' + output_txt
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
return output_txt, output_ids
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''])
UpperCAmelCase_ = tokenizer(['''hi''', '''I went to the gym''', ''''''])
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = '''Unicode €.'''
UpperCAmelCase_ = tokenizer(_snake_case)
UpperCAmelCase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''Unicode €.</s>''')
UpperCAmelCase_ = tokenizer('''e è é ê ë''')
UpperCAmelCase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''e è é ê ë</s>''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''e è é ê ë</s>''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0])
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0])
self.assertListEqual(_snake_case , _snake_case)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _snake_case)
self.assertIn('''attention_mask''' , _snake_case)
self.assertNotIn('''decoder_input_ids''' , _snake_case)
self.assertNotIn('''decoder_attention_mask''' , _snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase_ = tokenizer(
text_target=_snake_case , max_length=32 , padding='''max_length''' , truncation=_snake_case , return_tensors=_snake_case)
self.assertEqual(32 , targets['''input_ids'''].shape[1])
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization. </s>''']
UpperCAmelCase_ = ['''Summary of the text. </s>''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , text_target=_snake_case)
self.assertEqual(_snake_case , batch['''input_ids'''][0])
self.assertEqual(_snake_case , batch['''labels'''][0])
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
shutil.rmtree(_snake_case)
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(125)]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_snake_case)]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer_class.from_pretrained(_snake_case)
self.assertTrue(tokenizer.decode([255]) == '''''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
UpperCAmelCase_ = 0
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case)
for attr in attributes_list:
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , '''additional_special_tokens_ids''' , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [])
setattr(_snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [token_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [token_id_to_test_setters])
| 51 | 1 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""audio_values""", """audio_mask"""]
def __init__( self , __lowerCamelCase=2048 , __lowerCamelCase=1 , __lowerCamelCase=[16, 16] , __lowerCamelCase=128 , __lowerCamelCase=4_4100 , __lowerCamelCase=86 , __lowerCamelCase=2048 , __lowerCamelCase=0.0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase , )
__A : int = spectrogram_length
__A : Union[str, Any] = num_channels
__A : Any = patch_size
__A : Optional[Any] = feature_size // self.patch_size[1]
__A : Tuple = n_fft
__A : Optional[int] = sampling_rate // hop_length_to_sampling_rate
__A : int = sampling_rate
__A : Dict = padding_value
__A : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : str = spectrogram(
__lowerCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=8_0.0 , )
__A : Optional[int] = log_spec[:, :-1]
__A : Union[str, Any] = log_spec - 2_0.0
__A : List[str] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , **__lowerCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__A : Dict = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
__A : Any = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__A : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
__A : Union[str, Any] = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__A : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__A : str = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__A : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCamelCase ):
__A : List[str] = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__A : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__A : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__A : Dict = np.array(__lowerCamelCase ).astype(np.floataa )
# convert into correct format for padding
__A : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__A : Optional[int] = np.ones([len(__lowerCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__A : Tuple = padded_audio_features * self.padding_value
for i in range(len(__lowerCamelCase ) ):
__A : Any = audio_features[i]
__A : int = feature
# return as BatchFeature
if return_attention_mask:
__A : str = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__A : str = {'''audio_values''': padded_audio_features}
__A : Optional[int] = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 291 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 1 |
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
lowerCamelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : List[str] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Dict ="""swinv2"""
lowerCamelCase : Dict ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=224 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=96 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[3, 6, 12, 24] , lowerCAmelCase__=7 , lowerCAmelCase__=4.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=32 , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
a : List[Any] = image_size
a : List[Any] = patch_size
a : int = num_channels
a : Dict = embed_dim
a : Any = depths
a : List[str] = len(lowerCAmelCase__ )
a : str = num_heads
a : Any = window_size
a : Any = mlp_ratio
a : Union[str, Any] = qkv_bias
a : Dict = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : List[str] = drop_path_rate
a : Optional[int] = hidden_act
a : List[str] = use_absolute_embeddings
a : Dict = layer_norm_eps
a : List[Any] = initializer_range
a : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
a : int = (0, 0, 0, 0)
| 79 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
a : Tuple = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a : List[str] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
a : List[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> Dict:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> Any:
a : Optional[int] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
a : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
a : Union[str, Any] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
a : Optional[Any] = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 79 | 1 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowerCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Any , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = False
A_ = True
A_ = 99
A_ = 32
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = None
def __A ( self : str ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ):
A_ = TFDistilBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
A_ = model(UpperCAmelCase )
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ):
A_ = TFDistilBertForMaskedLM(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ):
A_ = TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFDistilBertForSequenceClassification(UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] ):
A_ = self.num_choices
A_ = TFDistilBertForMultipleChoice(UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : str , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFDistilBertForTokenClassification(UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Tuple ):
A_ = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) = config_and_inputs
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCamelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : Optional[int] ):
A_ = TFDistilBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def __A ( self : List[Any] ):
self.config_tester.run_common_tests()
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : Optional[int] ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A_ = TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : List[str] ):
A_ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) | 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) | 329 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCamelCase__ :
# setable values
a__ : Optional[int] = None
a__ : Optional[jnp.ndarray] = None
a__ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
return cls()
@dataclass
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : jnp.ndarray
a__ : jnp.ndarray
a__ : KarrasVeSchedulerState
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 1.0_07 , SCREAMING_SNAKE_CASE = 80 , SCREAMING_SNAKE_CASE = 0.05 , SCREAMING_SNAKE_CASE = 50 , ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = () ):
"""simple docstring"""
snake_case : Union[str, Any] = jnp.arange(0 , SCREAMING_SNAKE_CASE )[::-1].copy()
snake_case : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , schedule=jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
snake_case : str = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
snake_case : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case : List[str] = random.split(SCREAMING_SNAKE_CASE , num=1 )
snake_case : Union[str, Any] = self.config.s_noise * random.normal(key=SCREAMING_SNAKE_CASE , shape=sample.shape )
snake_case : Any = sigma + gamma * sigma
snake_case : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
snake_case : Optional[int] = sample_hat + sigma_hat * model_output
snake_case : List[str] = (sample_hat - pred_original_sample) / sigma_hat
snake_case : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE , derivative=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
snake_case : List[str] = sample_prev + sigma_prev * model_output
snake_case : List[str] = (sample_prev - pred_original_sample) / sigma_prev
snake_case : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE , derivative=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError()
| 148 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Union[str, Any] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case : int = deprecated_arg[3:]
snake_case : Optional[Any] = not kwargs.pop(SCREAMING_SNAKE_CASE )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
snake_case : Union[str, Any] = kwargs.pop("device_idx" , self.device_idx )
snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
snake_case : Union[str, Any] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**SCREAMING_SNAKE_CASE )
a__ : str = field(
default=lowerCamelCase_ , metadata={"""help""": """Name of TPU"""} , )
a__ : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
a__ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Benchmark models in eager model."""} )
a__ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
snake_case : int = None
if self.tpu:
try:
if self.tpu_name:
snake_case : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
snake_case : Optional[Any] = None
return tpu
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
snake_case : Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
snake_case : str = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
snake_case : List[str] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 148 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def __A ( self: str ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
_A = PegasusTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self: Optional[int] ) -> Optional[int]:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __A ( self: Dict , **__A: str ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: int , __A: List[Any] ) -> str:
return ("This is a test", "This is a test")
def __A ( self: Any ) -> Any:
_A = '''</s>'''
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __A ( self: Union[str, Any] ) -> List[Any]:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__A ) , 11_03 )
def __A ( self: Optional[int] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __A ( self: List[Any] ) -> List[Any]:
_A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
_A = rust_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0]
_A = py_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A , __A )
def __A ( self: Dict ) -> List[str]:
_A = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
_A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_A = tokenizer([raw_input_str] , return_tensors=__A ).input_ids[0]
self.assertListEqual(__A , __A )
def __A ( self: Union[str, Any] ) -> Tuple:
_A = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_A = '''To ensure a smooth flow of bank resolutions.'''
_A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_A = tokenizer([raw_input_str] , return_tensors=__A ).input_ids[0]
self.assertListEqual(__A , __A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __A ( self: Union[str, Any] ) -> Any:
_A = ['''This is going to be way too long.''' * 1_50, '''short example''']
_A = ['''not super long but more than 5 tokens''', '''tiny''']
_A = self._large_tokenizer(__A , padding=__A , truncation=__A , return_tensors='''pt''' )
_A = self._large_tokenizer(
text_target=__A , max_length=5 , padding=__A , truncation=__A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
@slow
def __A ( self: List[str] ) -> Union[str, Any]:
# fmt: off
_A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def __A ( self: List[str] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_A = PegasusTokenizer(__A , offset=0 , mask_token_sent=__A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self: List[str] ) -> str:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __A ( self: Union[str, Any] , **__A: int ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: List[Any] , __A: Union[str, Any] ) -> List[str]:
return ("This is a test", "This is a test")
def __A ( self: List[Any] ) -> List[str]:
_A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
_A = rust_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0]
_A = py_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A , __A )
@require_torch
def __A ( self: List[Any] ) -> Dict:
_A = ['''This is going to be way too long.''' * 10_00, '''short example''']
_A = ['''not super long but more than 5 tokens''', '''tiny''']
_A = self._large_tokenizer(__A , padding=__A , truncation=__A , return_tensors='''pt''' )
_A = self._large_tokenizer(
text_target=__A , max_length=5 , padding=__A , truncation=__A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
def __A ( self: Any ) -> str:
_A = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
_A = self._large_tokenizer(__A ).input_ids
self.assertListEqual(
__A , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 75 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __A ( _lowercase ):
'''simple docstring'''
_A = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_A = [3, 3, 3, 3]
_A = [5, 5, 5, 5]
elif "fl4" in model_name:
_A = [4, 4, 4, 4]
_A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_A = [3, 3, 3, 3]
if "lrf" in model_name:
_A = [3, 3, 3, 3]
else:
_A = [2, 2, 2, 2]
if "tiny" in model_name:
_A = 96
elif "small" in model_name:
_A = 96
elif "base" in model_name:
_A = 1_28
elif "large" in model_name:
_A = 1_92
elif "xlarge" in model_name:
_A = 2_56
elif "huge" in model_name:
_A = 3_52
# set label information
_A = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_A = '''imagenet-22k-id2label.json'''
else:
_A = '''imagenet-1k-id2label.json'''
_A = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def __A ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_A = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_A = '''encoder.''' + name
if "encoder.layers" in name:
_A = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_A = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_A = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_A = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_A = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_A = '''layernorm.weight'''
if name == "norm.bias":
_A = '''layernorm.bias'''
if "head" in name:
_A = name.replace('''head''' , '''classifier''' )
else:
_A = '''focalnet.''' + name
return name
def __A ( _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
_A = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_A = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _lowercase )
_A = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(_lowercase )
_A = val
_A = get_focalnet_config(_lowercase )
_A = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=2_24 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
_A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
_A = processor(images=_lowercase , return_tensors='''pt''' )
_A = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_A = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1e-4 )
_A = model(**_lowercase )
_A = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_A = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_A = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_A = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_A = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_A = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_A = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_a = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_a = {
"camembert-base": 512,
}
_a = "▁"
class __A ( A__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = CamembertTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 209 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> bool:
UpperCAmelCase : str = get_failure_array(_lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCAmelCase , UpperCAmelCase : Optional[Any] = 0, 0 # index into text, pattern
while i < len(_lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( _lowerCAmelCase : str ) -> list[int]:
UpperCAmelCase : Optional[Any] = [0]
UpperCAmelCase : str = 0
UpperCAmelCase : List[str] = 1
while j < len(_lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(_lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase__: str = "abc1abc12"
UpperCamelCase__: str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__: Any = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase__: Tuple = "ABABX"
UpperCamelCase__: Union[str, Any] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
UpperCamelCase__: Any = "AAAB"
UpperCamelCase__: str = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
UpperCamelCase__: int = "abcdabcy"
UpperCamelCase__: Any = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
UpperCamelCase__: List[str] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 23 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=16 , __magic_name__=36 , __magic_name__=6 , __magic_name__=6 , __magic_name__=6 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : List[str] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Optional[int] = use_input_mask
snake_case_ : Optional[int] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : Any = vocab_size
snake_case_ : int = embedding_size
snake_case_ : int = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Tuple = num_hidden_groups
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : Optional[int] = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Optional[int] = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : Union[str, Any] = scope
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_input_mask:
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Dict = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : List[str] = None
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : int = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
snake_case_ : str = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
snake_case_ : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ : str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : int = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ : Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Any = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : List[Any] = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_choices
snake_case_ : Any = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
(
snake_case_
) : int = config_and_inputs
snake_case_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ : str = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Any = True
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
snake_case_ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
snake_case_ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = AlbertModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : Tuple = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = AlbertModel.from_pretrained('''albert-base-v2''' )
snake_case_ : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
snake_case_ : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
snake_case_ : Optional[Any] = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 352 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
snake_case_ : List[str] = len(bin(_UpperCamelCase )[3:] )
snake_case_ : str = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
snake_case_ : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = SwinConfig()
snake_case_ = swin_name.split("_" )
snake_case_ = name_split[1]
snake_case_ = int(name_split[4] )
snake_case_ = int(name_split[3][-1] )
if model_size == "tiny":
snake_case_ = 9_6
snake_case_ = (2, 2, 6, 2)
snake_case_ = (3, 6, 1_2, 2_4)
elif model_size == "small":
snake_case_ = 9_6
snake_case_ = (2, 2, 1_8, 2)
snake_case_ = (3, 6, 1_2, 2_4)
elif model_size == "base":
snake_case_ = 1_2_8
snake_case_ = (2, 2, 1_8, 2)
snake_case_ = (4, 8, 1_6, 3_2)
else:
snake_case_ = 1_9_2
snake_case_ = (2, 2, 1_8, 2)
snake_case_ = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
snake_case_ = 2_1_8_4_1
else:
snake_case_ = 1_0_0_0
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(snake_case ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = img_size
snake_case_ = num_classes
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
return config
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
if "patch_embed.proj" in name:
snake_case_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case_ = "encoder." + name
if "attn.proj" in name:
snake_case_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
snake_case_ = "layernorm.weight"
if name == "norm.bias":
snake_case_ = "layernorm.bias"
if "head" in name:
snake_case_ = name.replace("head" , "classifier" )
else:
snake_case_ = "swin." + name
return name
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
snake_case_ = key.split("." )
snake_case_ = int(key_split[1] )
snake_case_ = int(key_split[3] )
snake_case_ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[
:dim
]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[
-dim:
]
else:
snake_case_ = val
return orig_state_dict
def UpperCamelCase_( snake_case : str , snake_case : str ):
'''simple docstring'''
snake_case_ = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
snake_case_ = get_swin_config(snake_case )
snake_case_ = SwinForImageClassification(snake_case )
model.eval()
snake_case_ = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
snake_case_ = Image.open(requests.get(snake_case , stream=snake_case ).raw )
snake_case_ = image_processor(images=snake_case , return_tensors="pt" )
snake_case_ = timm_model(inputs["pixel_values"] )
snake_case_ = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 85 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 | 0 |
'''simple docstring'''
A : Optional[int] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 227 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 1 |
"""simple docstring"""
import random
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if index >= len(UpperCamelCase__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(UpperCamelCase__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(UpperCamelCase__ , UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__ , UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__ , index - (m + count) )
| 221 | """simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = AlbertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
A__ = AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 221 | 1 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''', type=snake_case_, default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''', type=snake_case_, default=5 )
parser.add_argument('''--batch_size''', type=snake_case_, default=6 )
parser.add_argument('''--gradient_accumulation_steps''', type=snake_case_, default=1 )
parser.add_argument('''--freeze''', type=snake_case_, default=snake_case_ )
parser.add_argument('''--learning_rate''', type=snake_case_, default=5e-4 )
parser.add_argument('''--seed''', type=snake_case_, default=0 )
parser.add_argument('''--lr_scheduler_type''', type=snake_case_, default='''cosine''' )
parser.add_argument('''--num_warmup_steps''', type=snake_case_, default=10 )
parser.add_argument('''--weight_decay''', type=snake_case_, default=0.0_1 )
parser.add_argument('''--output_dir''', type=snake_case_, default='''./results''' )
return parser.parse_args()
a : List[str] = load('accuracy')
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ ,snake_case_ = eval_pred
snake_case_ = np.argmax(snake_case_, axis=1 )
return metric.compute(predictions=snake_case_, references=snake_case_ )
class a ( UpperCamelCase__ ):
def __init__( self : List[Any] , lowercase_ : Any ):
super().__init__()
snake_case_ = trainer
def A_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , **lowercase_ : Dict ):
if control.should_evaluate:
snake_case_ = deepcopy(__a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = get_args()
set_seed(args.seed )
snake_case_ = load_dataset('''codeparrot/codecomplex''', split='''train''' )
snake_case_ = dataset.train_test_split(test_size=0.2 )
snake_case_ = train_test['''test'''].train_test_split(test_size=0.5 )
snake_case_ = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
snake_case_ = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ = tokenizer.eos_token
snake_case_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
snake_case_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
snake_case_ = False
snake_case_ = ClassLabel(num_classes=7, names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(__UpperCAmelCase ):
snake_case_ = tokenizer(example['''src'''], truncation=snake_case_, max_length=1024 )
snake_case_ = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
snake_case_ = train_test_validation.map(
snake_case_, batched=snake_case_, remove_columns=train_test_validation['''train'''].column_names, )
snake_case_ = DataCollatorWithPadding(tokenizer=snake_case_ )
snake_case_ = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy='''epoch''', save_strategy='''epoch''', logging_strategy='''epoch''', per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.0_1, metric_for_best_model='''accuracy''', run_name='''complexity-java''', report_to='''wandb''', )
snake_case_ = Trainer(
model=snake_case_, args=snake_case_, train_dataset=tokenized_datasets['''train'''], eval_dataset=tokenized_datasets['''valid'''], tokenizer=snake_case_, data_collator=snake_case_, compute_metrics=snake_case_, )
print('''Training...''' )
trainer.add_callback(CustomCallback(snake_case_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
a : Tuple = 6_378_137.0
a : int = 6_356_752.314_245
a : Dict = 637_8137
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = (AXIS_A - AXIS_B) / AXIS_A
snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
snake_case_ = radians(__UpperCAmelCase )
snake_case_ = radians(__UpperCAmelCase )
# Equation
snake_case_ = sin((phi_a - phi_a) / 2 )
snake_case_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case_ = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
random.seed(__a )
np.random.seed(__a )
torch.manual_seed(__a )
torch.cuda.manual_seed_all(__a )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Iterable[torch.nn.Parameter] ,_a : float = 0.9999 ,_a : float = 0.0 ,_a : int = 0 ,_a : bool = False ,_a : Union[float, int] = 1.0 ,_a : Union[float, int] = 2 / 3 ,_a : Optional[Any] = None ,_a : Dict[str, Any] = None ,**_a : Tuple ,):
'''simple docstring'''
if isinstance(_a ,torch.nn.Module ):
_a : str = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' ,'1.0.0' ,_a ,standard_warn=_a ,)
_a : Optional[int] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_a : List[str] = True
if kwargs.get('max_value' ,_a ) is not None:
_a : Optional[int] = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' ,'1.0.0' ,_a ,standard_warn=_a )
_a : Union[str, Any] = kwargs['max_value']
if kwargs.get('min_value' ,_a ) is not None:
_a : List[str] = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' ,'1.0.0' ,_a ,standard_warn=_a )
_a : Optional[int] = kwargs['min_value']
_a : Tuple = list(_a )
_a : List[str] = [p.clone().detach() for p in parameters]
if kwargs.get('device' ,_a ) is not None:
_a : str = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' ,'1.0.0' ,_a ,standard_warn=_a )
self.to(device=kwargs['device'] )
_a : str = None
_a : Optional[Any] = decay
_a : Union[str, Any] = min_decay
_a : Tuple = update_after_step
_a : Union[str, Any] = use_ema_warmup
_a : str = inv_gamma
_a : int = power
_a : Tuple = 0
_a : Any = None # set in `step()`
_a : List[str] = model_cls
_a : Tuple = model_config
@classmethod
def __lowercase ( cls : Union[str, Any] ,_a : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a, _a : Union[str, Any] = model_cls.load_config(_a ,return_unused_kwargs=_a )
_a : Dict = model_cls.from_pretrained(_a )
_a : List[str] = cls(model.parameters() ,model_cls=_a ,model_config=model.config )
ema_model.load_state_dict(_a )
return ema_model
def __lowercase ( self : Dict ,_a : Any ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_a : str = self.model_cls.from_config(self.model_config )
_a : List[str] = self.state_dict()
state_dict.pop('shadow_params' ,_a )
model.register_to_config(**_a )
self.copy_to(model.parameters() )
model.save_pretrained(_a )
def __lowercase ( self : Dict ,_a : int ):
'''simple docstring'''
_a : Tuple = max(0 ,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_a : Optional[int] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_a : List[Any] = (1 + step) / (10 + step)
_a : Tuple = min(_a ,self.decay )
# make sure decay is not smaller than min_decay
_a : str = max(_a ,self.min_decay )
return cur_decay_value
@torch.no_grad()
def __lowercase ( self : Any ,_a : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(_a ,torch.nn.Module ):
_a : str = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' ,'1.0.0' ,_a ,standard_warn=_a ,)
_a : Tuple = parameters.parameters()
_a : Optional[Any] = list(_a )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_a : int = self.get_decay(self.optimization_step )
_a : Dict = decay
_a : int = 1 - decay
_a : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params ,_a ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_a : Union[str, Any] = deepspeed.zero.GatheredParameters(_a ,modifier_rank=_a )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_a )
def __lowercase ( self : Optional[int] ,_a : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
_a : str = list(_a )
for s_param, param in zip(self.shadow_params ,_a ):
param.data.copy_(s_param.to(param.device ).data )
def __lowercase ( self : int ,_a : Optional[Any]=None ,_a : int=None ):
'''simple docstring'''
_a : str = [
p.to(device=_a ,dtype=_a ) if p.is_floating_point() else p.to(device=_a )
for p in self.shadow_params
]
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __lowercase ( self : int ,_a : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
_a : List[Any] = [param.detach().cpu().clone() for param in parameters]
def __lowercase ( self : Optional[int] ,_a : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params ,_a ):
param.data.copy_(c_param.data )
# Better memory-wise.
_a : Union[str, Any] = None
def __lowercase ( self : List[Any] ,_a : dict ):
'''simple docstring'''
_a : List[Any] = copy.deepcopy(_a )
_a : Optional[int] = state_dict.get('decay' ,self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_a : List[Any] = state_dict.get('min_decay' ,self.min_decay )
if not isinstance(self.min_decay ,_a ):
raise ValueError('Invalid min_decay' )
_a : str = state_dict.get('optimization_step' ,self.optimization_step )
if not isinstance(self.optimization_step ,_a ):
raise ValueError('Invalid optimization_step' )
_a : Optional[int] = state_dict.get('update_after_step' ,self.update_after_step )
if not isinstance(self.update_after_step ,_a ):
raise ValueError('Invalid update_after_step' )
_a : str = state_dict.get('use_ema_warmup' ,self.use_ema_warmup )
if not isinstance(self.use_ema_warmup ,_a ):
raise ValueError('Invalid use_ema_warmup' )
_a : int = state_dict.get('inv_gamma' ,self.inv_gamma )
if not isinstance(self.inv_gamma ,(float, int) ):
raise ValueError('Invalid inv_gamma' )
_a : Optional[Any] = state_dict.get('power' ,self.power )
if not isinstance(self.power ,(float, int) ):
raise ValueError('Invalid power' )
_a : Union[str, Any] = state_dict.get('shadow_params' ,_a )
if shadow_params is not None:
_a : Union[str, Any] = shadow_params
if not isinstance(self.shadow_params ,_a ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(_a ,torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 271 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Any ,_a : Optional[int]=2 ,_a : Optional[Any]=True ,_a : Dict=False ,_a : Dict=10 ,_a : Any=3 ,_a : str=32 * 8 ,_a : Optional[int]=32 * 8 ,_a : int=4 ,_a : str=64 ,):
'''simple docstring'''
_a : Dict = parent
_a : Union[str, Any] = batch_size
_a : Tuple = is_training
_a : List[str] = use_auxiliary_loss
_a : Optional[Any] = num_queries
_a : str = num_channels
_a : List[str] = min_size
_a : int = max_size
_a : Optional[int] = num_labels
_a : List[str] = hidden_dim
_a : int = hidden_dim
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
_a : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
_a : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
_a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_a : str = self.num_queries
_a : Union[str, Any] = self.num_labels
_a : Tuple = [1, 1, 1, 1]
_a : Dict = self.num_channels
_a : str = 64
_a : Tuple = 128
_a : Optional[Any] = self.hidden_dim
_a : Union[str, Any] = self.hidden_dim
_a : List[Any] = self.hidden_dim
return config
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a, _a, _a, _a, _a : Optional[Any] = self.prepare_config_and_inputs()
_a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : str = output.encoder_hidden_states
_a : Any = output.pixel_decoder_hidden_states
_a : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def __lowercase ( self : List[str] ,_a : str ,_a : List[Any] ,_a : Any ,_a : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
_a : str = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[Any] = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : int = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[int] = model(_a )
comm_check_on_output(_a )
_a : List[str] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = MaskaFormerModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a : Dict = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = (self.model_tester.min_size,) * 2
_a : Any = {
'pixel_values': torch.randn((2, 3, *size) ,device=_a ),
'mask_labels': torch.randn((2, 10, *size) ,device=_a ),
'class_labels': torch.zeros(2 ,10 ,device=_a ).long(),
}
_a : List[Any] = self.model_tester.get_config()
_a : int = MaskaFormerForUniversalSegmentation(_a ).to(_a )
_a : str = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a ).to(_a )
_a : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_a : List[str] = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[str] = self.model_tester.prepare_config_and_inputs()
_a : Any = model_class(_a )
model.to(_a )
model.train()
_a : Union[str, Any] = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : str = True
_a : str = True
_a : List[str] = model_class(_a ).to(_a )
model.train()
_a : Optional[int] = model(_a ,mask_labels=_a ,class_labels=_a )
_a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : Any ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
_a : int = self.default_image_processor
_a : Tuple = prepare_img()
_a : Any = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[Any] = model(**_a )
_a : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Any = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Optional[Any] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : str = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Any = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[int] = model(**_a )
# masks_queries_logits
_a : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_a : Optional[Any] = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_a : str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Tuple = self.default_image_processor
_a : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : str = inputs['pixel_values'].to(_a )
_a : str = [el.to(_a ) for el in inputs['mask_labels']]
_a : Dict = [el.to(_a ) for el in inputs['class_labels']]
with torch.no_grad():
_a : List[str] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 271 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[int]:
_lowerCamelCase = str(snake_case )
_lowerCamelCase = [n]
for i in range(1 , len(snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> bool:
if len(str(snake_case ) ) > 3:
if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 11 )-> list[int]:
_lowerCamelCase = []
_lowerCamelCase = 13
while len(snake_case ) != count:
if validate(snake_case ):
_lowerCamelCase = list_truncated_nums(snake_case )
if all(is_prime(snake_case ) for i in list_nums ):
list_truncated_primes.append(snake_case )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE_ ( )-> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'{sum(compute_truncated_primes(1_1)) = }')
| 80 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'bird'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'Chef in the kitchen'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 80 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_A = datasets.logging.get_logger(__name__)
_A = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_A = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_A = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __UpperCamelCase ( _A , _A , _A=False , _A=False , _A=True , _A=False , _A="dummy_doc" ):
lowerCAmelCase_ = {doc: key_lines}
lowerCAmelCase_ = {doc: sys_lines}
lowerCAmelCase_ = {}
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ , lowerCAmelCase_ = reader.get_doc_mentions(_A , key_doc_lines[doc] , _A )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase_ = reader.set_annotated_parse_trees(_A , key_doc_lines[doc] , _A , _A )
lowerCAmelCase_ , lowerCAmelCase_ = reader.get_doc_mentions(_A , sys_doc_lines[doc] , _A )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase_ = reader.set_annotated_parse_trees(_A , key_doc_lines[doc] , _A , _A )
if remove_nested:
lowerCAmelCase_ , lowerCAmelCase_ = reader.remove_nested_coref_mentions(_A , _A )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCAmelCase_ , lowerCAmelCase_ = reader.remove_nested_coref_mentions(_A , _A )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCAmelCase_ = reader.get_mention_assignments(_A , _A )
lowerCAmelCase_ = reader.get_mention_assignments(_A , _A )
lowerCAmelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def __UpperCamelCase ( _A , _A , _A , _A , _A , _A , _A ):
lowerCAmelCase_ = get_coref_infos(_A , _A , _A , _A , _A , _A )
lowerCAmelCase_ = {}
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for name, metric in metrics:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = evaluator.evaluate_documents(_A , _A , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
lowerCAmelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCAmelCase_ = line.split()[5]
if not parse_col == "-":
lowerCAmelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ), codebase_urls=['''https://github.com/ns-moosavi/coval'''], reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCAmelCase_ = util.check_gold_parse_annotation(UpperCamelCase__ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCAmelCase_ = evaluate(
key_lines=UpperCamelCase__, sys_lines=UpperCamelCase__, metrics=UpperCamelCase__, NP_only=UpperCamelCase__, remove_nested=UpperCamelCase__, keep_singletons=UpperCamelCase__, min_span=UpperCamelCase__, )
return score
| 167 |
import pprint
import requests
_A = '''https://zenquotes.io/api'''
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 167 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__snake_case : int =logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""input_features""", """attention_mask"""]
def __init__(self ,__lowerCamelCase=80 ,__lowerCamelCase=1_60_00 ,__lowerCamelCase=0.0 ,__lowerCamelCase=10 ,__lowerCamelCase=25 ,__lowerCamelCase="hamming_window" ,__lowerCamelCase=3_2768.0 ,__lowerCamelCase=0.97 ,__lowerCamelCase=1.0 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=False ,**__lowerCamelCase ,) -> Any:
"""simple docstring"""
super().__init__(feature_size=__lowerCamelCase ,sampling_rate=__lowerCamelCase ,padding_value=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : List[str] = feature_size
lowerCAmelCase__ : Union[str, Any] = sampling_rate
lowerCAmelCase__ : Dict = padding_value
lowerCAmelCase__ : str = hop_length
lowerCAmelCase__ : Any = win_length
lowerCAmelCase__ : int = frame_signal_scale
lowerCAmelCase__ : Tuple = preemphasis_coeff
lowerCAmelCase__ : Optional[int] = mel_floor
lowerCAmelCase__ : Tuple = normalize_means
lowerCAmelCase__ : List[str] = normalize_vars
lowerCAmelCase__ : str = win_function
lowerCAmelCase__ : Tuple = return_attention_mask
lowerCAmelCase__ : Union[str, Any] = win_length * sampling_rate // 10_00
lowerCAmelCase__ : Dict = hop_length * sampling_rate // 10_00
lowerCAmelCase__ : List[str] = optimal_fft_length(self.sample_size )
lowerCAmelCase__ : List[Any] = (self.n_fft // 2) + 1
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
if self.win_function == "hamming_window":
lowerCAmelCase__ : Optional[int] = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=__lowerCamelCase )
else:
lowerCAmelCase__ : Optional[int] = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCAmelCase__ : str = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCAmelCase__ : Union[str, Any] = spectrogram(
one_waveform * self.frame_signal_scale ,window=__lowerCamelCase ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=__lowerCamelCase ,preemphasis=self.preemphasis_coeff ,mel_filters=__lowerCamelCase ,mel_floor=self.mel_floor ,log_mel='''log''' ,)
return msfc_features.T
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
if self.normalize_means:
lowerCAmelCase__ : Union[str, Any] = x[:input_length].mean(axis=0 )
lowerCAmelCase__ : Union[str, Any] = np.subtract(__lowerCamelCase ,__lowerCamelCase )
if self.normalize_vars:
lowerCAmelCase__ : Dict = x[:input_length].std(axis=0 )
lowerCAmelCase__ : List[str] = np.divide(__lowerCamelCase ,__lowerCamelCase )
if input_length < x.shape[0]:
lowerCAmelCase__ : Optional[Any] = padding_value
# make sure array is in float32
lowerCAmelCase__ : Union[str, Any] = x.astype(np.floataa )
return x
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[np.ndarray]:
"""simple docstring"""
lowerCAmelCase__ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__lowerCamelCase ,__lowerCamelCase ,self.padding_value ) for x, n in zip(__lowerCamelCase ,__lowerCamelCase )]
def __call__(self ,__lowerCamelCase ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ : Optional[Any] = isinstance(__lowerCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCAmelCase__ : Optional[Any] = is_batched_numpy or (
isinstance(__lowerCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : Optional[int] = [np.asarray(__lowerCamelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase ,np.ndarray ):
lowerCAmelCase__ : Any = np.asarray(__lowerCamelCase ,dtype=np.floataa )
elif isinstance(__lowerCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : str = [raw_speech]
# extract fbank features
lowerCAmelCase__ : int = [self._extract_mfsc_features(__lowerCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase__ : Optional[Any] = BatchFeature({'''input_features''': features} )
lowerCAmelCase__ : Any = self.pad(
__lowerCamelCase ,padding=__lowerCamelCase ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,**__lowerCamelCase ,)
# make sure list is in array format
lowerCAmelCase__ : List[Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] ,__lowerCamelCase ):
lowerCAmelCase__ : Optional[int] = [np.asarray(__lowerCamelCase ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase__ : List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowerCAmelCase__ : str = [np.asarray(__lowerCamelCase ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase__ : int = (
np.array(__lowerCamelCase ,dtype=np.intaa )
if self._get_padding_strategies(__lowerCamelCase ,max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase__ : Optional[Any] = self.normalize(
padded_inputs['''input_features'''] ,attention_mask=__lowerCamelCase )
if return_tensors is not None:
lowerCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 129 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__ ( lowerCamelCase_ : Any):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ : Dict = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''')
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ : int = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''')
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ : Optional[int] = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''')
if "img_encoder.layers" in name:
lowerCAmelCase__ : Tuple = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''')
if "blocks" in name and "res" not in name:
lowerCAmelCase__ : Dict = name.replace('''blocks''' ,'''layers''')
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ : Optional[int] = name.replace('''attn''' ,'''self_attn''')
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''proj''' ,'''out_proj''')
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ : List[Any] = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''')
if "norm1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''norm1''' ,'''layer_norm1''')
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ : int = name.replace('''norm2''' ,'''layer_norm2''')
if "img_encoder.norm" in name:
lowerCAmelCase__ : List[Any] = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''')
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ : List[Any] = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''')
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ : Tuple = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''')
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''')
if "ln_1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_1''' ,'''layer_norm1''')
if "ln_2" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_2''' ,'''layer_norm2''')
if "c_fc" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''c_fc''' ,'''fc1''')
if "c_proj" in name:
lowerCAmelCase__ : List[str] = name.replace('''c_proj''' ,'''fc2''')
if "text_encoder" in name:
lowerCAmelCase__ : str = name.replace('''text_encoder''' ,'''text_model''')
if "ln_final" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_final''' ,'''final_layer_norm''')
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ : Tuple = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''')
if "img_projector.linear_out." in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''')
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ : Tuple = name.replace('''text_projector.linear_hidden''' ,'''text_projection''')
if "text_projector.linear_out" in name:
lowerCAmelCase__ : Dict = name.replace('''text_projector.linear_out''' ,'''text_projection.3''')
return name
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Tuple = key.split('''.''')
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = int(key_split[2]), int(key_split[4])
lowerCAmelCase__ : Any = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : Dict = val[dim : dim * 2, :]
lowerCAmelCase__ : List[str] = val[-dim:, :]
else:
lowerCAmelCase__ : List[Any] = val[:dim]
lowerCAmelCase__ : List[str] = val[dim : dim * 2]
lowerCAmelCase__ : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Dict = key.split('''.''')
lowerCAmelCase__ : List[str] = int(key_split[3])
lowerCAmelCase__ : Any = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase__ : Union[str, Any] = val[:dim]
lowerCAmelCase__ : List[str] = val[dim : dim * 2]
lowerCAmelCase__ : str = val[-dim:]
else:
lowerCAmelCase__ : int = rename_key(lowerCamelCase_)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ : Dict = val.squeeze_()
else:
lowerCAmelCase__ : Tuple = val
return orig_state_dict
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : str = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple="groupvit-gcc-yfcc" ,lowerCamelCase_ : int=False):
'''simple docstring'''
lowerCAmelCase__ : Dict = GroupViTConfig()
lowerCAmelCase__ : Dict = GroupViTModel(lowerCamelCase_).eval()
lowerCAmelCase__ : Optional[int] = torch.load(lowerCamelCase_ ,map_location='''cpu''')['''model''']
lowerCAmelCase__ : List[Any] = convert_state_dict(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : Any = model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase_) == 0)
# verify result
lowerCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
lowerCAmelCase__ : Tuple = prepare_img()
lowerCAmelCase__ : Dict = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors='''pt''')
with torch.no_grad():
lowerCAmelCase__ : str = model(**lowerCamelCase_)
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[13.3523, 6.3629]])
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ : Tuple = torch.tensor([[16.1873, 8.6230]])
else:
raise ValueError(f"""Model name {model_name} not supported.""")
assert torch.allclose(outputs.logits_per_image ,lowerCamelCase_ ,atol=1E-3)
processor.save_pretrained(lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
print('''Successfully saved processor and model to''' ,lowerCamelCase_)
if push_to_hub:
print('''Pushing to the hub...''')
processor.push_to_hub(lowerCamelCase_ ,organization='''nielsr''')
model.push_to_hub(lowerCamelCase_ ,organization='''nielsr''')
if __name__ == "__main__":
__snake_case : int =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__snake_case : Tuple =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 129 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""})
__lowercase: Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""})
__lowercase: Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""})
__lowercase: Optional[int] = field(
default=1_00_00 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""})
__lowercase: Optional[float] = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""})
__lowercase: Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""})
__lowercase: Optional[int] = field(
default=7_50 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""})
__lowercase: Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""})
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""})
__lowercase: Optional[int] = field(default=5_00_00 , metadata={"""help""": """Maximum number of training steps."""})
__lowercase: Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
__lowercase: Optional[int] = field(default=10_24 , metadata={"""help""": """Sequence lengths used for training."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""})
__lowercase: Optional[int] = field(
default=10_24 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """If True the data is pretokenized."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""})
__lowercase: Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
__lowercase: Optional[int] = field(default=10_24 , metadata={"""help""": """Length of sequences to be evaluated."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
__lowercase: Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""})
__lowercase: Optional[int] = field(
default=snake_case__ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Sample from the language model's output distribution."""})
__lowercase: Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""})
__lowercase: Optional[int] = field(default=2_56 , metadata={"""help""": """Maximum number of newly generated tokens."""})
__lowercase: Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""})
__lowercase: Optional[float] = field(default=0.9_5 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""})
__lowercase: Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""})
__lowercase: Optional[int] = field(
default=2_00 , metadata={"""help""": """Number of completions to generate for each sample."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
__lowercase: Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""})
__lowercase: Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""})
__lowercase: Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = field(
default=snake_case__ , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__lowercase: Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""})
__lowercase: Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""})
__lowercase: Optional[int] = field(
default=10_00_00 , metadata={"""help""": """Number of files to save per JSON output file."""})
__lowercase: Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
__lowercase: Optional[float] = field(
default=10_00 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=1_00 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=0.2_5 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """If True, near-duplicate samples are removed."""})
__lowercase: Optional[float] = field(
default=0.8_5 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""})
__lowercase: Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""})
__lowercase: Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
__lowercase: Optional[int] = field(default=20_00_00 , metadata={"""help""": """Number of examples to train tokenizer on."""})
__lowercase: Optional[int] = field(
default=3_27_68 , metadata={"""help""": """Number of examples to train the tokenizer on."""})
__lowercase: Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""})
__lowercase: Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""})
__lowercase: Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""})
__lowercase: Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""})
| 233 | 0 |
'''simple docstring'''
from typing import Any
import numpy as np
def a ( __a ) -> bool:
'''simple docstring'''
return np.array_equal(__a , matrix.conjugate().T )
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = v.conjugate().T
UpperCamelCase__ :Optional[Any] = v_star.dot(__a )
assert isinstance(__a , np.ndarray )
return (v_star_dot.dot(__a )) / (v_star.dot(__a ))
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :Any = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCamelCase__ :Tuple = np.array([[1], [2], [3]] )
assert is_hermitian(__a ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(__a , __a ) )
UpperCamelCase__ :Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__a ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(__a , __a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
while b:
__a , __a : Tuple = b, a % b
return a
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
return a if b == 0 else euclidean_gcd_recursive(_SCREAMING_SNAKE_CASE , a % b )
def lowerCamelCase ():
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 294 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294 | 1 |
import os
import string
import sys
lowercase : Tuple = 1 << 8
lowercase : Dict = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
lowercase : Optional[int] = KEYMAP["""up"""]
lowercase : List[str] = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase : Dict = []
lowercase : Optional[int] = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowercase : List[str] = ord(str(i))
def A_ ( ) -> int:
if os.name == "nt":
import msvcrt
a__ : Optional[int] = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(A__ ) == 0:
# Read the keystroke
a__ : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a__ : Optional[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(A__ )
if ord(A__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
a__ : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
a__ : Tuple = cha[1]
else:
a__ : Union[str, Any] = ch.decode(A__ )
else:
a__ : Any = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a__ : List[Any] = sys.stdin.fileno()
a__ : Any = termios.tcgetattr(A__ )
try:
tty.setraw(A__ )
a__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(A__ , termios.TCSADRAIN , A__ )
return ch
def A_ ( ) -> List[Any]:
a__ : Union[str, Any] = get_raw_chars()
if ord(A__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(A__ ) == KEYMAP["esc"]:
a__ : Any = get_raw_chars()
if ord(A__ ) == KEYMAP["mod_int"]:
a__ : Dict = get_raw_chars()
if ord(A__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(A__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 99 |
def lowercase_ (A : Union[str, Any] , A : List[str] , A : int , A : Optional[int] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case__ : Union[str, Any] = mf_knapsack(i - 1 , A , A , A )
else:
snake_case__ : Any = max(
mf_knapsack(i - 1 , A , A , A ) , mf_knapsack(i - 1 , A , A , j - wt[i - 1] ) + val[i - 1] , )
snake_case__ : Optional[int] = val
return f[i][j]
def lowercase_ (A : Optional[int] , A : Union[str, Any] , A : str , A : Dict ):
snake_case__ : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case__ : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case__ : str = dp[i - 1][w_]
return dp[n][w_], dp
def lowercase_ (A : int , A : list , A : list ):
if not (isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case__ : Dict = len(A )
if num_items != len(A ):
snake_case__ : str = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(A )} values'''
)
raise ValueError(A )
for i in range(A ):
if not isinstance(wt[i] , A ):
snake_case__ : Optional[int] = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(A )
snake_case__ , snake_case__ : Optional[int] = knapsack(A , A , A , A )
snake_case__ : set = set()
_construct_solution(A , A , A , A , A )
return optimal_val, example_optional_set
def lowercase_ (A : list , A : list , A : int , A : int , A : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(A , A , i - 1 , A , A )
else:
optimal_set.add(A )
_construct_solution(A , A , i - 1 , j - wt[i - 1] , A )
if __name__ == "__main__":
a_ :Any = [3, 2, 4, 4]
a_ :List[Any] = [4, 3, 2, 3]
a_ :Union[str, Any] = 4
a_ :List[str] = 6
a_ :Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a_ , a_ :List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a_ , a_ :Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 277 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase__ (__lowerCamelCase="" ):
_SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
return os.path.join(__lowerCamelCase, str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_SCREAMING_SNAKE_CASE : int = AgentAudio(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__lowerCamelCase ) )
# Ensure that the file contains the same value as the original tensor
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = sf.read(__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , torch.tensor(__lowerCamelCase ) , atol=1E-4 ) )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_SCREAMING_SNAKE_CASE : List[Any] = get_new_path(suffix=".wav" )
sf.write(__lowerCamelCase , __lowerCamelCase , 1_6_0_0_0 )
_SCREAMING_SNAKE_CASE : int = AgentAudio(__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , __lowerCamelCase )
@require_vision
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_SCREAMING_SNAKE_CASE : int = AgentImage(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
_SCREAMING_SNAKE_CASE : Any = Image.open(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = AgentImage(__lowerCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Tuple = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = AgentImage(__lowerCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = "Hey!"
_SCREAMING_SNAKE_CASE : str = AgentText(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , agent_type.to_string() )
self.assertEqual(__lowerCamelCase , agent_type.to_raw() )
self.assertEqual(__lowerCamelCase , __lowerCamelCase ) | 325 |
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 325 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''yjernite/retribert-base-uncased''': 5_12,
}
A_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
_snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) )
_snake_case : List[Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Tuple = tokenize_chinese_chars
_snake_case : Tuple = normalizer_class(**a_ )
_snake_case : List[str] = do_lower_case
def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 64 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''GLPNFeatureExtractor''']
A_ = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[Any] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
_snake_case : Any = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Dict = tensor[:sequence_length]
else:
_snake_case : List[Any] = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : str = tensor[:sequence_length]
else:
_snake_case : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : str = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Union[str, Any] = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
import torch
_snake_case : Optional[Any] = """label""" if """label""" in features[0].keys() else """labels"""
_snake_case : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : Any = self.tokenizer.pad(
a_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""" if labels is None else None, )
if labels is None:
return batch
_snake_case : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
_snake_case : Any = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : Optional[int] = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
_snake_case : Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
_snake_case : List[Any] = [feature["""ner_tags"""] for feature in features]
_snake_case : str = padding_tensor(a_, -1, a_, a_ )
_snake_case : Any = [feature["""original_entity_spans"""] for feature in features]
_snake_case : int = padding_tensor(a_, (-1, -1), a_, a_ )
_snake_case : str = {k: torch.tensor(a_, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 132 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=13 , UpperCamelCase__ : int=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=99 , UpperCamelCase__ : List[str]=32 , UpperCamelCase__ : Optional[int]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[str]=4 , ) -> Dict:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_attention_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_choices
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_attention_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self : Tuple ) -> int:
'''simple docstring'''
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowercase__ = True
lowercase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase =model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_a )
__UpperCamelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__UpperCamelCase =jnp.array([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(_a )[0]
__UpperCamelCase =50000
__UpperCamelCase =(1, 6, vocab_size)
self.assertEqual(output.shape , _a )
__UpperCamelCase =jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "spiece.model"}
A_ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
A_ = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
A_ = "▁"
class _snake_case ( A_ ):
_A : List[Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Tuple=False ,SCREAMING_SNAKE_CASE__ : str="[CLS]" ,SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" ,SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : int="[SEP]" ,SCREAMING_SNAKE_CASE__ : Any="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Tuple="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE:Optional[int] = (
AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase ,normalized=_lowerCamelCase )
if isinstance(_lowerCamelCase ,_lowerCamelCase )
else mask_token
)
SCREAMING_SNAKE_CASE:List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase ,remove_space=_lowerCamelCase ,keep_accents=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCamelCase ,)
SCREAMING_SNAKE_CASE:List[str] = do_lower_case
SCREAMING_SNAKE_CASE:List[Any] = remove_space
SCREAMING_SNAKE_CASE:Dict = keep_accents
SCREAMING_SNAKE_CASE:List[str] = vocab_file
SCREAMING_SNAKE_CASE:List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def __UpperCamelCase ( self : Dict ):
return len(self.sp_model )
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:str = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
SCREAMING_SNAKE_CASE:Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE:Dict = None
return state
def __setstate__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:Any = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
SCREAMING_SNAKE_CASE:Tuple = {}
SCREAMING_SNAKE_CASE:Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ):
if self.remove_space:
SCREAMING_SNAKE_CASE:List[str] = " ".join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = inputs
SCREAMING_SNAKE_CASE:Dict = outputs.replace("``" ,"\"" ).replace("\'\'" ,"\"" )
if not self.keep_accents:
SCREAMING_SNAKE_CASE:Optional[Any] = unicodedata.normalize("NFKD" ,_lowerCamelCase )
SCREAMING_SNAKE_CASE:Tuple = "".join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE:Any = outputs.lower()
return outputs
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Optional[int] = self.preprocess_text(_lowerCamelCase )
SCREAMING_SNAKE_CASE:Optional[Any] = self.sp_model.encode(_lowerCamelCase ,out_type=_lowerCamelCase )
SCREAMING_SNAKE_CASE:Optional[int] = []
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE:List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase ,"" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE:Optional[Any] = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE:Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ):
return self.sp_model.PieceToId(_lowerCamelCase )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ):
return self.sp_model.IdToPiece(_lowerCamelCase )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:Dict = []
SCREAMING_SNAKE_CASE:Optional[int] = ""
SCREAMING_SNAKE_CASE:Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE:Tuple = True
SCREAMING_SNAKE_CASE:Union[str, Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE:List[Any] = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE:List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE:Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase ,token_ids_a=_lowerCamelCase ,already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE:str = [self.sep_token_id]
SCREAMING_SNAKE_CASE:str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE:List[Any] = os.path.join(
_lowerCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase ,"wb" ) as fi:
SCREAMING_SNAKE_CASE:int = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 139 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __lowerCamelCase ):
for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 288 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[str] = XGLMConfig
snake_case__ : Optional[Any] = {}
snake_case__ : Optional[int] = """gelu"""
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=14 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=0.02 , ):
UpperCamelCase :Tuple = parent
UpperCamelCase :Optional[Any] = batch_size
UpperCamelCase :List[Any] = seq_length
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Tuple = use_input_mask
UpperCamelCase :List[str] = use_labels
UpperCamelCase :Union[str, Any] = vocab_size
UpperCamelCase :Tuple = d_model
UpperCamelCase :Any = num_hidden_layers
UpperCamelCase :Union[str, Any] = num_attention_heads
UpperCamelCase :Optional[Any] = ffn_dim
UpperCamelCase :List[Any] = activation_function
UpperCamelCase :str = activation_dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :List[str] = max_position_embeddings
UpperCamelCase :Dict = initializer_range
UpperCamelCase :List[Any] = None
UpperCamelCase :str = 0
UpperCamelCase :Any = 2
UpperCamelCase :Any = 1
def _A ( self : int ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase :List[str] = None
if self.use_input_mask:
UpperCamelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :str = self.get_config()
UpperCamelCase :List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _A ( self : str ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__lowerCamelCase , )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Optional[Any] = config_and_inputs
UpperCamelCase :str = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
snake_case__ : Dict = (TFXGLMForCausalLM,) if is_tf_available() else ()
snake_case__ : Optional[Any] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : List[str] = False
def _A ( self : List[Any] ):
UpperCamelCase :Optional[Any] = TFXGLMModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def _A ( self : int ):
self.config_tester.run_common_tests()
@slow
def _A ( self : int ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :str = TFXGLMModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _A ( self : Dict ):
super().test_resize_token_embeddings()
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : Optional[Any] , __lowerCamelCase : Optional[int]=True ):
UpperCamelCase :Dict = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :str = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase :int = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase :str = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCamelCase )
@slow
def _A ( self : Tuple ):
UpperCamelCase :Tuple = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
UpperCamelCase :Optional[int] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
UpperCamelCase :str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
UpperCamelCase :Any = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , seed=[7, 0] )
UpperCamelCase :str = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def _A ( self : int ):
UpperCamelCase :List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :Tuple = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :str = """left"""
# use different length sentences to test batching
UpperCamelCase :Union[str, Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
UpperCamelCase :str = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding=__lowerCamelCase )
UpperCamelCase :Optional[int] = inputs["""input_ids"""]
UpperCamelCase :Dict = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
UpperCamelCase :List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
UpperCamelCase :List[Any] = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 )
UpperCamelCase :str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
UpperCamelCase :str = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 )
UpperCamelCase :Any = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
| 38 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ):
requires_backends(self , ["""bs4"""] )
super().__init__(**__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = []
UpperCamelCase :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
UpperCamelCase :Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _A ( self : Any , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" )
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Tuple = []
UpperCamelCase :Tuple = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Tuple = """"""
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Any , __lowerCamelCase : Dict ):
UpperCamelCase :Any = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
UpperCamelCase :Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(__lowerCamelCase )}.""" )
UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
UpperCamelCase :Any = [html_strings]
# Get nodes + xpaths
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :str = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
UpperCamelCase :int = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 38 | 1 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__UpperCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase_ :Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_ )
else:
lowerCAmelCase_ :Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(a_ )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = ProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_ )
lowerCAmelCase_ :Optional[int] = ["""key_proj""", """value_proj""", """query_proj"""]
lowerCAmelCase_ :Tuple = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowerCAmelCase_ :Optional[Any] = key.split(""".""" )
if attributes[0] == "lm_head":
lowerCAmelCase_ :List[Any] = prophet
lowerCAmelCase_ :Any = prophet_old
else:
lowerCAmelCase_ :str = prophet.prophetnet
lowerCAmelCase_ :Optional[Any] = prophet_old.model
lowerCAmelCase_ :str = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase_ :Optional[Any] = mapping[attribute]
if not hasattr(a_ , a_ ) and len(a_ ) > 0:
lowerCAmelCase_ :Union[str, Any] = attribute
elif hasattr(a_ , a_ ):
lowerCAmelCase_ :Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase_ :Dict = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
lowerCAmelCase_ :Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase_ :str = old_model.bias
logger.info(f"""{attribute} is initialized""" )
lowerCAmelCase_ :Union[str, Any] = True
break
elif attribute in special_keys and hasattr(a_ , """in_proj_weight""" ):
lowerCAmelCase_ :Optional[Any] = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase_ :str = getattr(a_ , a_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase_ :int = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase_ :List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase_ :Dict = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase_ :Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase_ :Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase_ :Dict = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase_ :Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
lowerCAmelCase_ :Dict = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
lowerCAmelCase_ :List[Any] = True
break
if attribute.isdigit():
lowerCAmelCase_ :Optional[Any] = model[int(a_ )]
lowerCAmelCase_ :Any = old_model[int(a_ )]
else:
lowerCAmelCase_ :Tuple = getattr(a_ , a_ )
if old_attribute == "":
lowerCAmelCase_ :Union[str, Any] = old_model
else:
if not hasattr(a_ , a_ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
lowerCAmelCase_ :Dict = getattr(a_ , a_ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(a_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 363 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int = 10**9 ) -> int:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'{solution() = }')
| 53 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 0 |
import re
def UpperCamelCase_( snake_case__: str ) -> bool:
UpperCAmelCase__ = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(snake_case__ , snake_case__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 335 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 384
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__a , 'use_cache' ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__a , __a )
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' )
UpperCAmelCase__ = tf.keras.models.load_model(__a )
UpperCAmelCase__ = model(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs['encoder_hidden_states']
UpperCAmelCase__ = outputs['encoder_attentions']
else:
UpperCAmelCase__ = outputs['hidden_states']
UpperCAmelCase__ = outputs['attentions']
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
def check_decoder_attentions_output(__a ):
UpperCAmelCase__ = len(__a )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 335 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
torch.manual_seed(0 )
__lowercase = UNetaDModel(
sample_size=(3_2, 6_4) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_2_8, 1_2_8) ,down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') ,)
return model
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=(6_4, 3_2) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_2_8, 1_2_8) ,down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') ,cross_attention_dim=1_0 ,)
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ):
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
sample_size=(1_2_8, 6_4) ,in_channels=1 ,out_channels=1 ,latent_channels=1 ,layers_per_block=2 ,block_out_channels=(1_2_8, 1_2_8) ,down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') ,up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') ,)
__lowercase = UNetaDModel(
sample_size=(6_4, 3_2) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_2_8, 1_2_8) ,down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') ,)
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = Mel(
x_res=self.dummy_unet.config.sample_size[1] ,y_res=self.dummy_unet.config.sample_size[0] ,)
__lowercase = DDPMScheduler()
__lowercase = AudioDiffusionPipeline(vqvae=lowercase__ ,unet=self.dummy_unet ,mel=lowercase__ ,scheduler=lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(4_2 )
__lowercase = pipe(generator=lowercase__ ,steps=4 )
__lowercase = output.audios[0]
__lowercase = output.images[0]
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(4_2 )
__lowercase = pipe(generator=lowercase__ ,steps=4 ,return_dict=lowercase__ )
__lowercase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowercase = np.frombuffer(image.tobytes() ,dtype='''uint8''' )[:1_0]
__lowercase = np.frombuffer(image_from_tuple.tobytes() ,dtype='''uint8''' )[:1_0]
__lowercase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowercase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] ,y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] ,)
__lowercase = DDIMScheduler()
__lowercase = self.dummy_vqvae_and_unet
__lowercase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=dummy_vqvae_and_unet[1] ,mel=lowercase__ ,scheduler=lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
np.random.seed(0 )
__lowercase = np.random.uniform(-1 ,1 ,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(4_2 )
__lowercase = pipe(raw_audio=lowercase__ ,generator=lowercase__ ,start_step=5 ,steps=1_0 )
__lowercase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowercase = np.frombuffer(image.tobytes() ,dtype='''uint8''' )[:1_0]
__lowercase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowercase = self.dummy_unet_condition
__lowercase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=lowercase__ ,mel=lowercase__ ,scheduler=lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
np.random.seed(0 )
__lowercase = torch.rand((1, 1, 1_0) )
__lowercase = pipe(generator=lowercase__ ,encoding=lowercase__ )
__lowercase = output.images[0]
__lowercase = np.frombuffer(image.tobytes() ,dtype='''uint8''' )[:1_0]
__lowercase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = torch_device
__lowercase = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(4_2 )
__lowercase = pipe(generator=lowercase__ )
__lowercase = output.audios[0]
__lowercase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowercase = np.frombuffer(image.tobytes() ,dtype='''uint8''' )[:1_0]
__lowercase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 104 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[str] = logging.getLogger()
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'all_results.json' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' ) as f:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ )
else:
raise ValueError(f"can't find {path}" )
return results
lowercase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
import xla_spawn
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[Any] = time()
xla_spawn.main()
lowerCAmelCase_ : Any = time()
lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
import xla_spawn
lowerCAmelCase_ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
xla_spawn.main()
| 224 | 0 |
def A ( lowercase ) -> bool:
'''simple docstring'''
UpperCamelCase = 0
for ch in input_str:
UpperCamelCase = ord(a__ )
UpperCamelCase = pow(2 , a__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
_UpperCAmelCase : str = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_UpperCAmelCase : Any = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_UpperCAmelCase : List[Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_UpperCAmelCase : Tuple = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_UpperCAmelCase : Union[str, Any] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_UpperCAmelCase : List[str] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_UpperCAmelCase : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_UpperCAmelCase : Dict = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 110 | 0 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[str] = parameterized.to_safe_name("_".join(str(__UpperCamelCase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@parameterized.expand(a_ , name_func=a_ )
def lowerCamelCase ( self : Optional[Any] , a_ : Union[str, Any] , a_ : List[str] ):
self.run_and_check(
stage=a_ , model=a_ , distributed=a_ , fpaa=a_ , )
@require_torch_multi_gpu
@parameterized.expand(a_ , name_func=a_ )
def lowerCamelCase ( self : Tuple , a_ : Dict , a_ : Union[str, Any] ):
self.run_and_check(
stage=a_ , model=a_ , distributed=a_ , fpaa=a_ , )
@parameterized.expand(a_ , name_func=a_ )
def lowerCamelCase ( self : Tuple , a_ : List[Any] , a_ : Dict ):
self.run_and_check(
stage=a_ , model=a_ , distributed=a_ , fpaa=a_ , )
@require_torch_multi_gpu
@parameterized.expand(a_ , name_func=a_ )
def lowerCamelCase ( self : str , a_ : int , a_ : Tuple ):
self.run_and_check(
stage=a_ , model=a_ , distributed=a_ , fpaa=a_ , )
def lowerCamelCase ( self : Any , a_ : Tuple ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowerCamelCase ( self : List[str] , a_ : str , a_ : str , a_ : int = 10 , a_ : bool = True , a_ : bool = True , a_ : bool = True , ):
lowerCAmelCase_ : List[Any] = models[model]
lowerCAmelCase_ : List[str] = self.run_trainer(
stage=a_ , model_name=a_ , eval_steps=a_ , num_train_epochs=1 , distributed=a_ , fpaa=a_ , )
self.do_checks(a_ )
return output_dir
def lowerCamelCase ( self : str , a_ : str , a_ : str , a_ : int = 10 , a_ : int = 1 , a_ : bool = True , a_ : bool = True , ):
lowerCAmelCase_ : List[str] = self.get_auto_remove_tmp_dir("./xxx" , after=a_ )
lowerCAmelCase_ : List[str] = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(a_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCAmelCase_ : Optional[int] = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
lowerCAmelCase_ : List[str] = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
lowerCAmelCase_ : Union[str, Any] = self.get_launcher(a_ )
lowerCAmelCase_ : List[str] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a_ , env=self.get_env() )
return output_dir
def lowerCamelCase ( self : str , a_ : Dict=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
lowerCAmelCase_ : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 241 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCAmelCase_ : str = precision
lowerCAmelCase_ : Tuple = ceil(precision / 14 )
lowerCAmelCase_ : Optional[int] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Any = 13591409
lowerCAmelCase_ : List[str] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
lowerCAmelCase_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase__ = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 241 | 1 |
SCREAMING_SNAKE_CASE_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> int:
_UpperCAmelCase : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
SCREAMING_SNAKE_CASE_ = [None] * 10000000
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCAmelCase : Union[str, Any] = chain(next_number(_lowerCAmelCase ) )
_UpperCAmelCase : int = number_chain
while number < 1000_0000:
_UpperCAmelCase : Dict = number_chain
number *= 10
return number_chain
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int = 1000_0000 ) -> int:
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 354 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> bool:
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase ) + 1
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase : List[str] = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
# since string of zero length match pattern of zero length
_UpperCAmelCase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase ):
for j in range(1 , lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase : Optional[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase : str = dp[i - 1][j]
else:
_UpperCAmelCase : int = 0
else:
_UpperCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE_ = 'aab'
SCREAMING_SNAKE_CASE_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 189 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( A__ ):
_lowercase : List[Any] = (DPMSolverSinglestepScheduler,)
_lowercase : List[str] = (('''num_inference_steps''', 25),)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> int:
SCREAMING_SNAKE_CASE = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf'),
'variance_type': None,
}
config.update(**a)
return config
def SCREAMING_SNAKE_CASE__ ( self , a=0 , **a) -> Tuple:
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE = kwargs.pop('num_inference_steps' , a)
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**a)
SCREAMING_SNAKE_CASE = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(a , time_step + scheduler.config.solver_order + 1):
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a , **a).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self , a=0 , **a) -> Tuple:
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE = kwargs.pop('num_inference_steps' , a)
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a , **a).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self , a=None , **a) -> Any:
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**a)
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**a)
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE = model(a , a)
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(a)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
SCREAMING_SNAKE_CASE = model(a , a)
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a).prev_sample
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_mean.item() - 0.25_74) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=a)
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_mean.item() - 0.27_91) < 1E-3
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config)
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config)
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config)
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config)
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=a)
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_mean.item() - 0.27_91) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
self.check_over_configs(thresholding=a)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , algorithm_type='dpmsolver++' , solver_order=a , solver_type=a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a , solver_type=a , prediction_type=a , algorithm_type=a , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=a , solver_type=a , prediction_type=a , algorithm_type=a , )
assert not torch.isnan(a).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.check_over_configs(lower_order_final=a)
self.check_over_configs(lower_order_final=a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
self.check_over_configs(lambda_min_clipped=-float('inf'))
self.check_over_configs(lambda_min_clipped=-5.1)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
self.check_over_configs(variance_type=a)
self.check_over_configs(variance_type='learned_range')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a , time_step=0)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_mean.item() - 0.27_91) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.full_loop(use_karras_sigmas=a)
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_mean.item() - 0.22_48) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type='v_prediction')
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_mean.item() - 0.14_53) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=a)
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_mean.item() - 0.06_49) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=a , dynamic_thresholding_ratio=0)
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE = model(a , a)
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a).prev_sample
assert sample.dtype == torch.floataa
| 137 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=10 , a=18 , a=30 , a=400 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=None , ) -> Dict:
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_frames
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = crop_size
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : List[str] = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = VivitImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'image_mean'))
self.assertTrue(hasattr(a , 'image_std'))
self.assertTrue(hasattr(a , 'do_normalize'))
self.assertTrue(hasattr(a , 'do_resize'))
self.assertTrue(hasattr(a , 'do_center_crop'))
self.assertTrue(hasattr(a , 'size'))
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL videos
SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a)
for video in video_inputs:
self.assertIsInstance(a , a)
self.assertIsInstance(video[0] , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for video in video_inputs:
self.assertIsInstance(a , a)
self.assertIsInstance(video[0] , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for video in video_inputs:
self.assertIsInstance(a , a)
self.assertIsInstance(video[0] , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 137 | 1 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int ) -> int:
__SCREAMING_SNAKE_CASE :Tuple = [1]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = 0, 0, 0
__SCREAMING_SNAKE_CASE :List[str] = ugly_nums[ia] * 2
__SCREAMING_SNAKE_CASE :List[Any] = ugly_nums[ia] * 3
__SCREAMING_SNAKE_CASE :List[str] = ugly_nums[ia] * 5
for _ in range(1 , a_ ):
__SCREAMING_SNAKE_CASE :List[Any] = min(a_ , a_ , a_ )
ugly_nums.append(a_ )
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE :Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE :Union[str, Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE :Dict = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(2_0_0) = }') | 239 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@require_torch
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = pipeline(
task='''zero-shot-audio-classification''' ,model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
__SCREAMING_SNAKE_CASE :Any = load_dataset('''ashraq/esc50''' )
__SCREAMING_SNAKE_CASE :int = dataset['''train''']['''audio'''][-1]['''array''']
__SCREAMING_SNAKE_CASE :Dict = audio_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] ,)
@unittest.skip('''No models are available in TF''' )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@slow
@require_torch
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = pipeline(
task='''zero-shot-audio-classification''' ,model='''laion/clap-htsat-unfused''' ,)
# This is an audio of a dog
__SCREAMING_SNAKE_CASE :List[Any] = load_dataset('''ashraq/esc50''' )
__SCREAMING_SNAKE_CASE :Tuple = dataset['''train''']['''audio'''][-1]['''array''']
__SCREAMING_SNAKE_CASE :str = audio_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] ,)
__SCREAMING_SNAKE_CASE :Dict = audio_classifier([audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = audio_classifier(
[audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ,batch_size=5 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
@unittest.skip('''No models are available in TF''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass | 239 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
a_ : Dict = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__A , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__A , default=5 )
parser.add_argument('--batch_size' , type=__A , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__A , default=1 )
parser.add_argument('--freeze' , type=__A , default=__A )
parser.add_argument('--learning_rate' , type=__A , default=5e-4 )
parser.add_argument('--seed' , type=__A , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__A , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__A , default=10 )
parser.add_argument('--weight_decay' , type=__A , default=0.01 )
parser.add_argument('--output_dir' , type=__A , default='./results' )
return parser.parse_args()
UpperCAmelCase_ : Optional[int] = load('accuracy')
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> str:
"""simple docstring"""
a_ , a_ : str = eval_pred
a_ : int = np.argmax(__A , axis=1 )
return metric.compute(predictions=__A , references=__A )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> None:
super().__init__()
a_ : List[str] = trainer
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
if control.should_evaluate:
a_ : List[Any] = deepcopy(SCREAMING_SNAKE_CASE__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
a_ : List[Any] = get_args()
set_seed(args.seed )
a_ : Optional[int] = load_dataset('codeparrot/codecomplex' , split='train' )
a_ : Optional[Any] = dataset.train_test_split(test_size=0.2 )
a_ : Dict = train_test['test'].train_test_split(test_size=0.5 )
a_ : Dict = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
a_ : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
a_ : Dict = tokenizer.eos_token
a_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
a_ : int = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a_ : Union[str, Any] = False
a_ : int = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__A : str ):
a_ : Optional[Any] = tokenizer(example['src'] , truncation=__A , max_length=10_24 )
a_ : Union[str, Any] = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a_ : List[str] = train_test_validation.map(
__A , batched=__A , remove_columns=train_test_validation['train'].column_names , )
a_ : Tuple = DataCollatorWithPadding(tokenizer=__A )
a_ : Optional[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
a_ : Optional[Any] = Trainer(
model=__A , args=__A , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__A , data_collator=__A , compute_metrics=__A , )
print('Training...' )
trainer.add_callback(CustomCallback(__A ) )
trainer.train()
if __name__ == "__main__":
main()
| 32 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
def lowercase_ ( self : str)-> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__) for k, v in self.__dict__.items()})
| 217 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE = frozenset([] )
def UpperCamelCase ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''),up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''),cross_attention_dim=32,attention_head_dim=(2, 4),use_linear_projection=__lowerCamelCase,)
A__ = DDIMScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule='''scaled_linear''',clip_sample=__lowerCamelCase,set_alpha_to_one=__lowerCamelCase,)
A__ = DDIMInverseScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule='''scaled_linear''',clip_sample=__lowerCamelCase,set_alpha_to_zero=__lowerCamelCase,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='''gelu''',projection_dim=512,)
A__ = CLIPTextModel(__lowerCamelCase )
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0 ):
A__ = floats_tensor((1, 16, 16),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
A__ = floats_tensor((1, 2, 4, 16, 16),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
A__ = torch.manual_seed(__lowerCamelCase )
else:
A__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
A__ = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0 ):
A__ = floats_tensor((1, 3, 32, 32),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
A__ = image.cpu().permute(0,2,3,1 )[0]
A__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' )
if str(__lowerCamelCase ).startswith('''mps''' ):
A__ = torch.manual_seed(__lowerCamelCase )
else:
A__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
A__ = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0 ):
A__ = floats_tensor((1, 3, 32, 32),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
A__ = image.cpu().permute(0,2,3,1 )[0]
A__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' )
if str(__lowerCamelCase ).startswith('''mps''' ):
A__ = torch.manual_seed(__lowerCamelCase )
else:
A__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
A__ = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ):
if not hasattr(self.pipeline_class,'''_optional_components''' ):
return
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
A__ = self.get_dummy_inputs(__lowerCamelCase )
A__ = pipe(**__lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCamelCase )
A__ = self.pipeline_class.from_pretrained(__lowerCamelCase )
pipe_loaded.to(__lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCamelCase,__lowerCamelCase ) is None,f"`{optional_component}` did not stay set to None after loading.",)
A__ = self.get_dummy_inputs(__lowerCamelCase )
A__ = pipe_loaded(**__lowerCamelCase )[0]
A__ = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCamelCase,1E-4 )
def UpperCamelCase ( self ):
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = self.get_dummy_mask_inputs(__lowerCamelCase )
A__ = pipe.generate_mask(**__lowerCamelCase )
A__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape,(1, 16, 16) )
A__ = np.array([0] * 9 )
A__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase,1E-3 )
self.assertEqual(mask[0, -3, -4],0 )
def UpperCamelCase ( self ):
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
A__ = pipe.invert(**__lowerCamelCase ).images
A__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape,(2, 32, 32, 3) )
A__ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799],)
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase,1E-3 )
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase ( self ):
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = {'''beta_start''': 0.00085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
A__ = DPMSolverMultistepScheduler(**__lowerCamelCase )
A__ = DPMSolverMultistepInverseScheduler(**__lowerCamelCase )
A__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
A__ = pipe.invert(**__lowerCamelCase ).images
A__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape,(2, 32, 32, 3) )
A__ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799],)
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase,1E-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase ( cls ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
A__ = raw_image.convert('''RGB''' ).resize((768, 768) )
A__ = raw_image
def UpperCamelCase ( self ):
A__ = torch.manual_seed(0 )
A__ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''',safety_checker=__lowerCamelCase,torch_dtype=torch.floataa )
A__ = DDIMScheduler.from_config(pipe.scheduler.config )
A__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''a bowl of fruit'''
A__ = '''a bowl of pears'''
A__ = pipe.generate_mask(
image=self.raw_image,source_prompt=__lowerCamelCase,target_prompt=__lowerCamelCase,generator=__lowerCamelCase,)
A__ = pipe.invert(
prompt=__lowerCamelCase,image=self.raw_image,inpaint_strength=0.7,generator=__lowerCamelCase ).latents
A__ = pipe(
prompt=__lowerCamelCase,mask_image=__lowerCamelCase,image_latents=__lowerCamelCase,generator=__lowerCamelCase,negative_prompt=__lowerCamelCase,inpaint_strength=0.7,output_type='''numpy''',).images[0]
A__ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase ( self ):
A__ = torch.manual_seed(0 )
A__ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''',safety_checker=__lowerCamelCase,torch_dtype=torch.floataa )
A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''a bowl of fruit'''
A__ = '''a bowl of pears'''
A__ = pipe.generate_mask(
image=self.raw_image,source_prompt=__lowerCamelCase,target_prompt=__lowerCamelCase,generator=__lowerCamelCase,)
A__ = pipe.invert(
prompt=__lowerCamelCase,image=self.raw_image,inpaint_strength=0.7,generator=__lowerCamelCase,num_inference_steps=25,).latents
A__ = pipe(
prompt=__lowerCamelCase,mask_image=__lowerCamelCase,image_latents=__lowerCamelCase,generator=__lowerCamelCase,negative_prompt=__lowerCamelCase,inpaint_strength=0.7,num_inference_steps=25,output_type='''numpy''',).images[0]
A__ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 39 |
def UpperCamelCase__( )->Dict:
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 19_01
A__ = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 39 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
UpperCAmelCase : Any = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
UpperCAmelCase : List[Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 109 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase_ = HfApi()
UpperCamelCase_ = {}
# fmt: off
UpperCamelCase_ = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
UpperCamelCase_ = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
UpperCamelCase_ = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
UpperCamelCase_ = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
UpperCamelCase_ = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
UpperCamelCase_ = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
UpperCamelCase_ = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
UpperCamelCase_ = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
UpperCamelCase_ = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
UpperCamelCase_ = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
UpperCamelCase_ = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
UpperCamelCase_ = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
UpperCamelCase_ = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
UpperCamelCase_ = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
UpperCamelCase_ = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
UpperCamelCase_ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase_ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith('''CompVis'''):
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 371 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["pixel_values"]
def __init__( self: List[str] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: bool = True ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: bool = True ,**lowerCamelCase_: List[Any] ,) -> None:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : int = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase_ : Any = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : List[Any] = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ,param_name="""crop_size""" )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : int = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : List[Any] = do_rescale
UpperCAmelCase_ : str = rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : List[Any] = do_convert_rgb
def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Dict ,) -> np.ndarray:
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ : Dict = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[Any] ,) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Any ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[int, float] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[int] ,) -> str:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[str] ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: ImageInput ,lowerCamelCase_: bool = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: int = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: float = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase_: Union[str, Any] ,) -> PIL.Image.Image:
UpperCAmelCase_ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ ,param_name="""size""" ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Tuple = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[str] = [convert_to_rgb(lowerCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : str = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Optional[int] = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Optional[Any] = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : str = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Any = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 59 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def snake_case ( A__ ,A__ ,A__=None ,A__=None ):
if attention_mask is None:
UpperCAmelCase_ : Any = tf.cast(tf.math.not_equal(_lowerCAmelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCamelCase_ :
__magic_name__ = OPTConfig
__magic_name__ = {}
__magic_name__ = '''gelu'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=99 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Any=20 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : Optional[Any]=16 , ) -> Dict:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = eos_token_id
UpperCAmelCase_ : Optional[Any] = pad_token_id
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : Optional[Any] = embed_dim
UpperCAmelCase_ : Optional[Any] = word_embed_proj_dim
UpperCAmelCase_ : Any = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : int = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case_ , **self.config_updates , )
UpperCAmelCase_ : List[Any] = prepare_opt_inputs_dict(snake_case_ , snake_case_ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = TFOPTModel(config=snake_case_ )
UpperCAmelCase_ : Optional[int] = inputs_dict["input_ids"]
UpperCAmelCase_ : Tuple = input_ids[:1, :]
UpperCAmelCase_ : int = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : Union[str, Any] = 1
# first forward pass
UpperCAmelCase_ : Tuple = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
UpperCAmelCase_ : str = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
@require_tf
class UpperCamelCase_ (A__ , A__ , unittest.TestCase ):
__magic_name__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__magic_name__ = (TFOPTForCausalLM,) if is_tf_available() else ()
__magic_name__ = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = 10
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ : Any = TFOPTModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ):
if hasattr(snake_case_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCAmelCase_ : Tuple = model_class(config=snake_case_ )
UpperCAmelCase_ : List[Any] = _get_word_embedding_weight(snake_case_ , model.get_input_embeddings() )
UpperCAmelCase_ : Dict = _get_word_embedding_weight(snake_case_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case_ )
UpperCAmelCase_ : Optional[Any] = _get_word_embedding_weight(snake_case_ , model.get_input_embeddings() )
UpperCAmelCase_ : str = _get_word_embedding_weight(snake_case_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCAmelCase_ : int = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case_ )
# check that weights remain the same after resizing
UpperCAmelCase_ : Dict = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCAmelCase_ : Optional[Any] = False
self.assertTrue(snake_case_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case_ )
UpperCAmelCase_ : List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCAmelCase_ : List[str] = False
self.assertTrue(snake_case_ )
def snake_case ( A__ ):
return tf.constant(_lowerCAmelCase ,dtype=tf.intaa )
@require_tf
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = 99
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCAmelCase_ : Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCAmelCase_ : List[str] = input_ids.shape[0]
UpperCAmelCase_ : Union[str, Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : List[Any] = TFOPTModel.from_pretrained("facebook/opt-350m" )
UpperCAmelCase_ : Optional[Any] = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase_ : Tuple = tf.not_equal(snake_case_ , model.config.pad_token_id )
with tf.GradientTape():
UpperCAmelCase_ : Tuple = model(input_ids=snake_case_ , attention_mask=snake_case_ ).last_hidden_state
UpperCAmelCase_ : List[str] = (1, 11, 512)
self.assertEqual(output.shape , snake_case_ )
UpperCAmelCase_ : Optional[Any] = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case_ , atol=4e-3 ) )
UpperCAmelCase_ : Optional[int] = tf.function(snake_case_ , jit_compile=snake_case_ )
UpperCAmelCase_ : Any = xla_generate(snake_case_ , snake_case_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case_ , atol=4e-2 ) )
@require_tf
@slow
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
super().setUp()
UpperCAmelCase_ : Any = "facebook/opt-350m"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : str = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCAmelCase_ : Any = GPTaTokenizer.from_pretrained(self.path_model )
UpperCAmelCase_ : Optional[Any] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCAmelCase_ : Tuple = tokenizer(snake_case_ , return_tensors="tf" , padding=snake_case_ , add_special_tokens=snake_case_ )
UpperCAmelCase_ : str = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCAmelCase_ : Optional[Any] = tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-4 ) )
UpperCAmelCase_ : Optional[Any] = tf.function(snake_case_ , jit_compile=snake_case_ )
UpperCAmelCase_ : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-4 ) )
@require_tf
@slow
class UpperCamelCase_ (unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_ : List[Any] = "facebook/opt-125m"
UpperCAmelCase_ : List[str] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[Any] = TFOPTForCausalLM.from_pretrained(snake_case_ )
for prompt in self.prompts:
UpperCAmelCase_ : Optional[int] = tokenizer(snake_case_ , return_tensors="tf" ).input_ids
UpperCAmelCase_ : List[str] = model.generate(snake_case_ , max_length=10 )
UpperCAmelCase_ : List[str] = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
predicted_outputs += generated_string
self.assertListEqual(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_ : Optional[int] = "facebook/opt-350m"
UpperCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : Tuple = TFOPTForCausalLM.from_pretrained(snake_case_ )
UpperCAmelCase_ : int = "left"
# use different length sentences to test batching
UpperCAmelCase_ : Any = [
"Hello, my dog is a little",
"Today, I",
]
UpperCAmelCase_ : List[str] = tokenizer(snake_case_ , return_tensors="tf" , padding=snake_case_ )
UpperCAmelCase_ : Tuple = inputs["input_ids"]
UpperCAmelCase_ : int = model.generate(input_ids=snake_case_ , attention_mask=inputs["attention_mask"] )
UpperCAmelCase_ : List[Any] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCAmelCase_ : int = model.generate(input_ids=snake_case_ )
UpperCAmelCase_ : Tuple = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
UpperCAmelCase_ : Any = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCAmelCase_ : Optional[int] = model.generate(input_ids=snake_case_ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : Dict = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_ )
UpperCAmelCase_ : Dict = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[str] = "facebook/opt-350m"
UpperCAmelCase_ : Optional[Any] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : List[str] = TFOPTForCausalLM.from_pretrained(snake_case_ )
for prompt in self.prompts:
UpperCAmelCase_ : Any = tokenizer(snake_case_ , return_tensors="tf" ).input_ids
UpperCAmelCase_ : List[Any] = model.generate(snake_case_ , max_length=10 )
UpperCAmelCase_ : str = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
predicted_outputs += generated_string
self.assertListEqual(snake_case_ , snake_case_ )
| 268 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class snake_case ( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def lowercase_ ( self : List[str])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , UpperCamelCase__ : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: int = hidden_states.shape
__lowerCAmelCase: Tuple = jax.image.resize(
__snake_case , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__lowerCAmelCase: List[str] = self.conv(__snake_case)
return hidden_states
class snake_case ( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def lowercase_ ( self : Union[str, Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , UpperCamelCase__ : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.conv(__snake_case)
return hidden_states
class snake_case ( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : bool = None
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.in_channels if self.out_channels is None else self.out_channels
__lowerCAmelCase: List[str] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5)
__lowerCAmelCase: Optional[int] = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCAmelCase: Tuple = nn.Dense(__snake_case , dtype=self.dtype)
__lowerCAmelCase: Tuple = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5)
__lowerCAmelCase: Optional[int] = nn.Dropout(self.dropout_prob)
__lowerCAmelCase: Union[str, Any] = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCAmelCase: Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCAmelCase: int = None
if use_nin_shortcut:
__lowerCAmelCase: List[Any] = nn.Conv(
__snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=True)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = hidden_states
__lowerCAmelCase: Optional[Any] = self.norma(__snake_case)
__lowerCAmelCase: Optional[int] = nn.swish(__snake_case)
__lowerCAmelCase: Optional[int] = self.conva(__snake_case)
__lowerCAmelCase: Tuple = self.time_emb_proj(nn.swish(__snake_case))
__lowerCAmelCase: Any = jnp.expand_dims(jnp.expand_dims(__snake_case , 1) , 1)
__lowerCAmelCase: Dict = hidden_states + temb
__lowerCAmelCase: Optional[Any] = self.norma(__snake_case)
__lowerCAmelCase: str = nn.swish(__snake_case)
__lowerCAmelCase: Any = self.dropout(__snake_case , __snake_case)
__lowerCAmelCase: Union[str, Any] = self.conva(__snake_case)
if self.conv_shortcut is not None:
__lowerCAmelCase: Optional[Any] = self.conv_shortcut(__snake_case)
return hidden_states + residual
| 370 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case ( pl.LightningModule ):
def __init__( self : str , UpperCamelCase__ : List[str])-> str:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Optional[int] = model
__lowerCAmelCase: Tuple = 2
__lowerCAmelCase: List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels)
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
pass
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# load longformer model from model identifier
__lowerCAmelCase: List[str] = LongformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = LightningModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase: Optional[Any] = LongformerForQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 108 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase_ :
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return None
class lowerCAmelCase_ :
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return None
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE__ , """tf""" , 12 , **SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 12 , **SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE__ : int = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE__ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 12 , SCREAMING_SNAKE_CASE__ )
@require_tf
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ : Any = self._test_export(SCREAMING_SNAKE_CASE__ , """tf""" , 12 , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = quantize(Path(SCREAMING_SNAKE_CASE__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE__ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 12 , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = quantize(SCREAMING_SNAKE_CASE__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE__ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(SCREAMING_SNAKE_CASE__ ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_tokenizers
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE__ : Dict = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
SCREAMING_SNAKE_CASE__ : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """pt""" )
@require_tf
@require_tokenizers
@slow
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
from transformers import TFBertModel
SCREAMING_SNAKE_CASE__ : Dict = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """tf""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = infer_shapes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE__ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
SCREAMING_SNAKE_CASE__ : List[str] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE__ ) , set(SCREAMING_SNAKE_CASE__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE__ , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 25 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _UpperCamelCase ( A , A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionLatentUpscalePipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
lowerCAmelCase__ = True
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =1
__lowercase =4
__lowercase =(1_6, 1_6)
__lowercase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(_lowerCAmelCase)
return image
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
torch.manual_seed(0)
__lowercase =UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=_lowerCAmelCase , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=_lowerCAmelCase , only_cross_attention=_lowerCAmelCase , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
__lowercase =AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
__lowercase =EulerDiscreteScheduler(prediction_type='sample')
__lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='quick_gelu' , projection_dim=5_1_2 , )
__lowercase =CLIPTextModel(_lowerCAmelCase)
__lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowercase ={
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str=0):
'''simple docstring'''
if str(_lowerCAmelCase).startswith('mps'):
__lowercase =torch.manual_seed(_lowerCAmelCase)
else:
__lowercase =torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase ='cpu'
__lowercase =self.get_dummy_components()
__lowercase =self.pipeline_class(**_lowerCAmelCase)
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase =self.get_dummy_inputs(_lowerCAmelCase)
__lowercase =pipe(**_lowerCAmelCase).images
__lowercase =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3))
__lowercase =np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055])
__lowercase =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowerCAmelCase , 1e-3)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def __lowerCamelCase ( self : int):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3)
def __lowerCamelCase ( self : int):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =[
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
__lowercase =self.get_dummy_components()
__lowercase =self.pipeline_class(**_lowerCAmelCase)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowerCAmelCase)
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase =self.get_dummy_inputs(_lowerCAmelCase)
__lowercase =2
__lowercase =[]
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__lowercase =getattr(_lowerCAmelCase , scheduler_enum.name)
__lowercase =scheduler_cls.from_config(pipe.scheduler.config)
__lowercase =pipe(**_lowerCAmelCase)[0]
outputs.append(_lowerCAmelCase)
assert check_same_shape(_lowerCAmelCase)
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =torch.manual_seed(3_3)
__lowercase =StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa)
pipe.to('cuda')
__lowercase =StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__lowercase ='a photo of an astronaut high resolution, unreal engine, ultra realistic'
__lowercase =pipe(_lowerCAmelCase , generator=_lowerCAmelCase , output_type='latent').images
__lowercase =upscaler(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=2_0 , guidance_scale=0 , generator=_lowerCAmelCase , output_type='np' , ).images[0]
__lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5e-2
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =torch.manual_seed(3_3)
__lowercase =StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__lowercase ='the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
__lowercase =upscaler(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=2_0 , guidance_scale=0 , generator=_lowerCAmelCase , output_type='np' , ).images[0]
__lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5e-2
| 48 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for attribute in key.split('.' ):
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__lowercase =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase =value
elif weight_type == "weight_g":
__lowercase =value
elif weight_type == "weight_v":
__lowercase =value
elif weight_type == "bias":
__lowercase =value
else:
__lowercase =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =fairseq_model.state_dict()
__lowercase =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase =None
for name, value in fairseq_dict.items():
__lowercase =False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowercase =True
elif name.split('.' )[0] == "proj":
__lowercase =fairseq_model.proj
__lowercase =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase =True
if "*" in mapped_key:
__lowercase =name.split(_lowerCAmelCase )[0].split('.' )[-2]
__lowercase =mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
__lowercase ='weight_g'
elif "weight_v" in name:
__lowercase ='weight_v'
elif "bias" in name:
__lowercase ='bias'
elif "weight" in name:
__lowercase ='weight'
else:
__lowercase =None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =full_name.split('conv_layers.' )[-1]
__lowercase =name.split('.' )
__lowercase =int(items[0] )
__lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase =emb.weight.shape
__lowercase =nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
__lowercase =emb.weight.data
return lin_layer
def _A ( _lowerCAmelCase ):
"""simple docstring"""
with open(_lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__lowercase =f.readlines()
__lowercase =[line.split(' ' )[0] for line in lines]
__lowercase =len(_lowerCAmelCase )
__lowercase ={
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__lowercase =WavaVecaConfig.from_pretrained(_lowerCAmelCase )
__lowercase =SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
__lowercase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__lowercase , __lowercase , __lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase =model[0].eval()
# set weights for wav2vec2 encoder
__lowercase =WavaVecaModel(_lowerCAmelCase )
__lowercase =recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
__lowercase =SpeechaTextaForCausalLM(_lowerCAmelCase )
__lowercase , __lowercase =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase =SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
__lowercase =False
# add projection layer
__lowercase =nn.Parameter(projection_layer.weight )
__lowercase =nn.Parameter(projection_layer.bias )
__lowercase =create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , 'vocab.json' ) )
tokenizer.save_pretrained(_lowerCAmelCase )
__lowercase =hf_wavavec.config.to_dict()
__lowercase =tokenizer.pad_token_id
__lowercase =tokenizer.bos_token_id
__lowercase =tokenizer.eos_token_id
__lowercase ='speech_to_text_2'
__lowercase ='wav2vec2'
__lowercase =SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 48 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase : Optional[int] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int=None ) -> Dict:
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE__ = random.Random()
SCREAMING_SNAKE_CASE__ = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE__ = []
for _ in range(__UpperCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE__ = np.array(__UpperCamelCase , dtype=jnp.intaa ).reshape(__UpperCamelCase )
return output
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor(__UpperCamelCase , vocab_size=2 , rng=__UpperCamelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE__ = 1
return attn_mask
@require_flax
class __snake_case :
lowerCAmelCase_ = None
lowerCAmelCase_ = ()
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = inputs["""input_ids"""].shape[-1] // 2
SCREAMING_SNAKE_CASE__ = inputs["""input_ids"""][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE__ = jnp.ones_like(_lowercase )
SCREAMING_SNAKE_CASE__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = pt_model_class(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ = load_flax_weights_in_pytorch_model(_lowercase , flax_model.params )
SCREAMING_SNAKE_CASE__ = flax_model.generate(_lowercase ).sequences
SCREAMING_SNAKE_CASE__ = pt_model.generate(torch.tensor(_lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 0.8
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 0.3
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __snake_case ( unittest.TestCase ):
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
SCREAMING_SNAKE_CASE__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
SCREAMING_SNAKE_CASE__ = """Hello world"""
SCREAMING_SNAKE_CASE__ = tokenizer(_lowercase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowercase , """do_samples""" ):
model.generate(_lowercase , do_samples=_lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowercase , """foo""" ):
SCREAMING_SNAKE_CASE__ = {"""foo""": """bar"""}
model.generate(_lowercase , **_lowercase )
| 219 | import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase : Any = 16
__lowerCamelCase : List[Any] = 32
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 , __UpperCamelCase : str = "bert-base-cased" ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
model.eval()
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
SCREAMING_SNAKE_CASE__ = metric.compute()
return eval_metric["accuracy"]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ = args.model_name_or_path
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE__ = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE__ = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE__ = args.resume_from_checkpoint.split("""epoch_""" )[1]
SCREAMING_SNAKE_CASE__ = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE__ = int(__UpperCamelCase ) + 1
SCREAMING_SNAKE_CASE__ = evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
accelerator.print("""resumed checkpoint performance:""" , __UpperCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE__ = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE__ = f"""epoch_{epoch}"""
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = accuracy
SCREAMING_SNAKE_CASE__ = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE__ = optimizer.param_groups[0]["""lr"""]
SCREAMING_SNAKE_CASE__ = epoch
SCREAMING_SNAKE_CASE__ = overall_step
accelerator.print(f"""epoch {epoch}:""" , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCamelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=2 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 219 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowerCamelCase = ["""gpt2"""]
__lowerCamelCase = """gpt2"""
if is_tf_available():
class UpperCAmelCase ( tf.Module ):
def __init__(self : Union[str, Any] , snake_case__ : str ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case : int = tokenizer
snake_case : Any = AutoConfig.from_pretrained(snake_case__ )
snake_case : Optional[int] = TFGPTaLMHeadModel.from_config(snake_case__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
snake_case : str = self.tokenizer(snake_case__ )
snake_case : Any = tokenized["input_ids"].to_tensor()
snake_case : Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case : Dict = self.model(input_ids=snake_case__ , attention_mask=snake_case__ )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
super().setUp()
snake_case : List[Any] = [GPTaTokenizer.from_pretrained(snake_case__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case : List[Any] = [TFGPTaTokenizer.from_pretrained(snake_case__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case : Dict = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
snake_case : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[str]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
snake_case : Tuple = tokenizer([test_inputs] , return_tensors="tf" )
snake_case : List[Any] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case : int = python_outputs[key].numpy()
snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case__ , tf.intaa ) == tf_outputs_values ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Union[str, Any] = tf.function(snake_case__ )
for test_inputs in self.test_sentences:
snake_case : Dict = tf.constant(snake_case__ )
snake_case : int = compiled_tokenizer(snake_case__ )
snake_case : List[str] = tf_tokenizer(snake_case__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : List[str] = ModelToSave(tokenizer=snake_case__ )
snake_case : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = model.serving(snake_case__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case : List[str] = Path(snake_case__ ) / "saved.model"
tf.saved_model.save(snake_case__ , snake_case__ , signatures={"serving_default": model.serving} )
snake_case : int = tf.saved_model.load(snake_case__ )
snake_case : int = loaded_model.signatures["serving_default"](snake_case__ )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Any = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[str] = tf_tokenizer(snake_case__ ) # Build model with some sample inputs
snake_case : Any = tf_tokenizer.get_config()
snake_case : int = TFGPTaTokenizer.from_config(snake_case__ )
snake_case : Union[str, Any] = model_from_config(snake_case__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case : str = 12_31_23
for max_length in [3, 5, 10_24]:
snake_case : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = tf_tokenizer(snake_case__ , max_length=snake_case__ )
snake_case : List[str] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 10 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : Optional[int] = ['''small''', '''medium''', '''large''']
__UpperCamelCase : List[Any] = '''lm_head.decoder.weight'''
__UpperCamelCase : Tuple = '''lm_head.weight'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Dict = torch.load(A_ )
lowerCAmelCase__ : int = d.pop(A_ )
os.makedirs(A_ , exist_ok=A_ )
torch.save(A_ , os.path.join(A_ , A_ ) )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Optional[int] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
__UpperCamelCase : Any = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 106 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowercase : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : Optional[Any], lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : int = 8, **lowerCamelCase : Tuple, )-> None:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : int =do_rescale
lowerCamelCase__ : Dict =rescale_factor
lowerCamelCase__ : Union[str, Any] =do_pad
lowerCamelCase__ : Union[str, Any] =pad_size
def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : float, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int )-> np.ndarray:
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_image_size(lowerCamelCase )
lowerCamelCase__ : List[str] =(old_height // size + 1) * size - old_height
lowerCamelCase__ : List[str] =(old_width // size + 1) * size - old_width
return pad(lowerCamelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : ImageInput, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> Dict:
lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : str =do_pad if do_pad is not None else self.do_pad
lowerCamelCase__ : int =pad_size if pad_size is not None else self.pad_size
lowerCamelCase__ : Optional[int] =make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images]
if do_pad:
lowerCamelCase__ : Tuple =[self.pad(lowerCamelCase, size=lowerCamelCase ) for image in images]
lowerCamelCase__ : int =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images]
lowerCamelCase__ : Dict ={'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
| 238 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = CTRLTokenizer
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ :Optional[int] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
UpperCamelCase__ :List[str] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :int = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
UpperCamelCase__ :List[str] = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = '''adapt react readapt apt'''
UpperCamelCase__ :List[Any] = '''adapt react readapt apt'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ :Union[str, Any] = '''adapt react readapt apt'''
UpperCamelCase__ :Tuple = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
UpperCamelCase__ :Tuple = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase__ :int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ ) | 219 |
'''simple docstring'''
from __future__ import annotations
__snake_case = [True] * 1000001
__snake_case = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__snake_case = False
i += 1
def a ( __a ) -> bool:
'''simple docstring'''
return seive[n]
def a ( __a ) -> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(__a ) )
def a ( __a = 1000000 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Any = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__a ) and not contains_an_even_digit(__a ):
UpperCamelCase__ :str = str(__a )
UpperCamelCase__ :List[str] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__a ) )]
if all(is_prime(__a ) for i in list_nums ):
result.append(__a )
return result
def a ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""") | 219 | 1 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> Any:
# Input as list
lowerCamelCase_ = list(poly_a or [0] )[:]
lowerCamelCase_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCamelCase_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCamelCase_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCamelCase_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCamelCase_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCamelCase_ = self.__multiply()
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
lowerCamelCase_ = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return dft[0]
#
lowerCamelCase_ = self.c_max_length // 2
while next_ncol > 0:
lowerCamelCase_ = [[] for i in range(__SCREAMING_SNAKE_CASE )]
lowerCamelCase_ = self.root**next_ncol
# First half of next step
lowerCamelCase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__SCREAMING_SNAKE_CASE ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCamelCase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__SCREAMING_SNAKE_CASE ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCamelCase_ = new_dft
lowerCamelCase_ = next_ncol // 2
return dft[0]
def UpperCamelCase ( self : List[Any] ) -> List[str]:
lowerCamelCase_ = self.__dft('A' )
lowerCamelCase_ = self.__dft('B' )
lowerCamelCase_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCamelCase_ = 2
while next_ncol <= self.c_max_length:
lowerCamelCase_ = [[] for i in range(__SCREAMING_SNAKE_CASE )]
lowerCamelCase_ = self.root ** (next_ncol // 2)
lowerCamelCase_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCamelCase_ = new_inverse_c
next_ncol *= 2
# Unpack
lowerCamelCase_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Union[str, Any] ) -> Dict:
lowerCamelCase_ = 'A = ' + ' + '.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCamelCase_ = 'B = ' + ' + '.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCamelCase_ = 'A*B = ' + ' + '.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class a ( __snake_case ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=1 ) -> str:
lowerCamelCase_ = tokenizer
lowerCamelCase_ = dataset
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
lowerCamelCase_ = n_copies
def __iter__( self : Dict ) -> Any:
lowerCamelCase_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase_ = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a ( __snake_case ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
lowerCamelCase_ = start_length
lowerCamelCase_ = eof_strings
lowerCamelCase_ = tokenizer
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> Tuple:
lowerCamelCase_ = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict=20 , **_lowerCamelCase : Dict ) -> List[str]:
lowerCamelCase_ = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase_ = batch['ids'].shape[-1]
lowerCamelCase_ = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase_ = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase_ = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ = generated_tokens.cpu().numpy()
lowerCamelCase_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase_ = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase__ ( ) -> Tuple:
# Setup configuration
lowerCamelCase_ = HfArgumentParser(_lowerCamelCase )
lowerCamelCase_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ = 'false'
if args.num_workers is None:
lowerCamelCase_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ = load_dataset('openai_humaneval' )
lowerCamelCase_ = load_metric('code_eval' )
lowerCamelCase_ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase_ = args.n_samples // args.batch_size
lowerCamelCase_ = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase_ = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase_ = human_eval['test'][task]['test']
lowerCamelCase_ = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_ , lowerCamelCase_ = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 183 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowercase : Tuple = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : str =VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : Dict =torch.manual_seed(0 )
lowerCamelCase__ : str =pipe(
image=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''', ).images
lowerCamelCase__ : Dict =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 361 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['torch', 'torchsde']
def __init__( self : Union[str, Any], *lowerCamelCase : str, **lowerCamelCase : int )-> Tuple:
requires_backends(self, ['''torch''', '''torchsde'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Optional[Any], **lowerCamelCase : Dict )-> str:
requires_backends(cls, ['''torch''', '''torchsde'''] )
@classmethod
def snake_case ( cls : Tuple, *lowerCamelCase : Dict, **lowerCamelCase : Tuple )-> List[str]:
requires_backends(cls, ['''torch''', '''torchsde'''] )
| 272 | 0 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
a :List[Any] = SwinConfig()
a :Any = swin_name.split('''_''' )
a :int = name_split[1]
a :Optional[Any] = int(name_split[4] )
a :List[str] = int(name_split[3][-1] )
if model_size == "tiny":
a :Union[str, Any] = 96
a :List[Any] = (2, 2, 6, 2)
a :Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
a :str = 96
a :Tuple = (2, 2, 18, 2)
a :Any = (3, 6, 12, 24)
elif model_size == "base":
a :Dict = 128
a :Any = (2, 2, 18, 2)
a :Tuple = (4, 8, 16, 32)
else:
a :str = 192
a :int = (2, 2, 18, 2)
a :List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a :Optional[Any] = 2_1841
else:
a :Union[str, Any] = 1000
a :int = '''huggingface/label-files'''
a :List[Any] = '''imagenet-1k-id2label.json'''
a :Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
a :str = {int(__snake_case ): v for k, v in idalabel.items()}
a :str = idalabel
a :str = {v: k for k, v in idalabel.items()}
a :Optional[int] = img_size
a :List[Any] = num_classes
a :Optional[int] = embed_dim
a :List[Any] = depths
a :Tuple = num_heads
a :int = window_size
return config
def __lowerCamelCase ( UpperCAmelCase_ : Dict ):
"""simple docstring"""
if "patch_embed.proj" in name:
a :List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
a :Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
a :List[Any] = '''encoder.''' + name
if "attn.proj" in name:
a :Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a :int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a :List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a :Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a :str = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a :Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
a :Optional[int] = '''layernorm.weight'''
if name == "norm.bias":
a :List[Any] = '''layernorm.bias'''
if "head" in name:
a :Optional[Any] = name.replace('''head''' , '''classifier''' )
else:
a :Optional[Any] = '''swin.''' + name
return name
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a :Optional[int] = orig_state_dict.pop(__snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a :Dict = key.split('''.''' )
a :Dict = int(key_split[1] )
a :Optional[int] = int(key_split[3] )
a :Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a :int = val[:dim, :]
a :Union[str, Any] = val[
dim : dim * 2, :
]
a :Union[str, Any] = val[-dim:, :]
else:
a :str = val[
:dim
]
a :Tuple = val[
dim : dim * 2
]
a :List[Any] = val[
-dim:
]
else:
a :Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Any = timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
a :Tuple = get_swin_config(__snake_case )
a :Union[str, Any] = SwinForImageClassification(__snake_case )
model.eval()
a :Optional[Any] = convert_state_dict(timm_model.state_dict() , __snake_case )
model.load_state_dict(__snake_case )
a :Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a :List[Any] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
a :Union[str, Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
a :Dict = image_processor(images=__snake_case , return_tensors='''pt''' )
a :Dict = timm_model(inputs['''pixel_values'''] )
a :str = model(**__snake_case ).logits
assert torch.allclose(__snake_case , __snake_case , atol=1E-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case : Dict = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 94 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
if isinstance(image[0], PIL.Image.Image ):
_UpperCamelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_UpperCamelCase = np.concatenate(__snake_case, axis=0 )
_UpperCamelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0
_UpperCamelCase = image.transpose(0, 3, 1, 2 )
_UpperCamelCase = 2.0 * image - 1.0
_UpperCamelCase = torch.from_numpy(__snake_case )
elif isinstance(image[0], torch.Tensor ):
_UpperCamelCase = torch.cat(__snake_case, dim=0 )
return image
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=0.9995 ) -> List[Any]:
"""simple docstring"""
if not isinstance(__snake_case, np.ndarray ):
_UpperCamelCase = True
_UpperCamelCase = va.device
_UpperCamelCase = va.cpu().numpy()
_UpperCamelCase = va.cpu().numpy()
_UpperCamelCase = np.sum(va * va / (np.linalg.norm(__snake_case ) * np.linalg.norm(__snake_case )) )
if np.abs(__snake_case ) > DOT_THRESHOLD:
_UpperCamelCase = (1 - t) * va + t * va
else:
_UpperCamelCase = np.arccos(__snake_case )
_UpperCamelCase = np.sin(__snake_case )
_UpperCamelCase = theta_a * t
_UpperCamelCase = np.sin(__snake_case )
_UpperCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCamelCase = sin_theta_t / sin_theta_a
_UpperCamelCase = sa * va + sa * va
if inputs_are_torch:
_UpperCamelCase = torch.from_numpy(__snake_case ).to(__snake_case )
return va
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = F.normalize(__snake_case, dim=-1 )
_UpperCamelCase = F.normalize(__snake_case, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
for param in model.parameters():
_UpperCamelCase = value
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a , __a , __a , __a , __a=None , __a=None , __a=None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , clip_model=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , coca_model=__a , coca_tokenizer=__a , coca_transform=__a , )
_UpperCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , __a)
else feature_extractor.size['''shortest_edge''']
)
_UpperCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , __a)
set_requires_grad(self.clip_model , __a)
def UpperCAmelCase ( self , __a = "auto") -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
set_requires_grad(self.vae , __a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
set_requires_grad(self.vae , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
set_requires_grad(self.unet , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
set_requires_grad(self.unet , __a)
def UpperCAmelCase ( self , __a , __a , __a) -> Any:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> Tuple:
'''simple docstring'''
if not isinstance(__a , torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a):
_UpperCamelCase = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(__a)
]
_UpperCamelCase = torch.cat(__a , dim=0)
else:
_UpperCamelCase = self.vae.encode(__a).latent_dist.sample(__a)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 0.1_8215 * init_latents
_UpperCamelCase = init_latents.repeat_interleave(__a , dim=0)
_UpperCamelCase = randn_tensor(init_latents.shape , generator=__a , device=__a , dtype=__a)
# get latents
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = self.coca_transform(__a).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
_UpperCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('''<end_of_text>''')[0].replace('''<start_of_text>''' , '''''').rstrip(''' .,''')
def UpperCAmelCase ( self , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feature_extractor.preprocess(__a)
_UpperCamelCase = torch.from_numpy(clip_image_input['''pixel_values'''][0]).unsqueeze(0).to(self.device).half()
_UpperCamelCase = self.clip_model.get_image_features(__a)
_UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a)
_UpperCamelCase = image_embeddings_clip.repeat_interleave(__a , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = latents.detach().requires_grad_()
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_UpperCamelCase = self.scheduler.alphas_cumprod[timestep]
_UpperCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCamelCase = torch.sqrt(__a)
_UpperCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.sigmas[index]
_UpperCamelCase = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 1 / 0.1_8215 * sample
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = transforms.Resize(self.feature_extractor_size)(__a)
_UpperCamelCase = self.normalize(__a).to(latents.dtype)
_UpperCamelCase = self.clip_model.get_image_features(__a)
_UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a)
_UpperCamelCase = spherical_dist_loss(__a , __a).mean() * clip_guidance_scale
_UpperCamelCase = -torch.autograd.grad(__a , __a)[0]
if isinstance(self.scheduler , __a):
_UpperCamelCase = latents.detach() + grads * (sigma**2)
_UpperCamelCase = noise_pred_original
else:
_UpperCamelCase = noise_pred_original - torch.sqrt(__a) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , __a , __a , __a = None , __a = None , __a = 5_12 , __a = 5_12 , __a = 0.6 , __a = 50 , __a = 7.5 , __a = 1 , __a = 0.0 , __a = 1_00 , __a = None , __a = "pil" , __a = True , __a = 0.8 , __a = 0.1 , __a = 0.1 , ) -> Dict:
'''simple docstring'''
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__a)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(__a , torch.Generator) and batch_size > 1:
_UpperCamelCase = [generator] + [None] * (batch_size - 1)
_UpperCamelCase = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_UpperCamelCase = [x[0] for x in coca_is_none if x[1]]
_UpperCamelCase = ''', '''.join(__a)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_UpperCamelCase = self.get_image_description(__a)
if style_prompt is None:
if len(__a):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_UpperCamelCase = self.get_image_description(__a)
# get prompt text embeddings for content and style
_UpperCamelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_UpperCamelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_UpperCamelCase = slerp(__a , __a , __a)
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = text_embeddings.repeat_interleave(__a , dim=0)
# set timesteps
_UpperCamelCase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_UpperCamelCase = {}
if accepts_offset:
_UpperCamelCase = 1
self.scheduler.set_timesteps(__a , **__a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# Preprocess image
_UpperCamelCase = preprocess(__a , __a , __a)
_UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a)
_UpperCamelCase = preprocess(__a , __a , __a)
_UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a)
_UpperCamelCase = slerp(__a , __a , __a)
if clip_guidance_scale > 0:
_UpperCamelCase = self.get_clip_image_embeddings(__a , __a)
_UpperCamelCase = self.get_clip_image_embeddings(__a , __a)
_UpperCamelCase = slerp(
__a , __a , __a)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase = content_text_input.input_ids.shape[-1]
_UpperCamelCase = self.tokenizer([''''''] , padding='''max_length''' , max_length=__a , return_tensors='''pt''')
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCamelCase = uncond_embeddings.repeat_interleave(__a , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCamelCase = torch.randn(__a , generator=__a , device='''cpu''' , dtype=__a).to(
self.device)
else:
_UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_UpperCamelCase = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
# check if the scheduler accepts generator
_UpperCamelCase = '''generator''' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_UpperCamelCase = generator
with self.progress_bar(total=__a):
for i, t in enumerate(__a):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2)
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCamelCase = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCamelCase , _UpperCamelCase = self.cond_fn(
__a , __a , __a , __a , __a , __a , __a , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 1 / 0.1_8215 * latents
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a)
| 194 | 0 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self : Tuple ) -> str:
'''simple docstring'''
A__ = ""
A__ = ""
A__ = []
A__ = 0
A__ = 256
A__ = 0
A__ = 0
A__ = 0
A__ = 0
def __magic_name__ ( self : Optional[Any] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
A__ = cva.imread(snake_case_ , 0 )
A__ = copy.deepcopy(self.img )
A__, A__, A__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
A__ = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
A__ = x[i] / self.k
self.sk += prk
A__ = (self.L - 1) * self.sk
if self.rem != 0:
A__ = int(last % last )
A__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
A__ = int(np.ma.count(self.img ) / self.img[1].size )
A__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
A__ = self.img[j][i]
if num != self.last_list[num]:
A__ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def __magic_name__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def __magic_name__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
SCREAMING_SNAKE_CASE = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 230 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 230 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.