code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowercase : Any = (DPMSolverSDEScheduler,)
__lowercase : Tuple = 10
def snake_case_ ( self , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_snake_case)
return config
def snake_case_ ( self):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_snake_case)
def snake_case_ ( self):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case)
def snake_case_ ( self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case)
def snake_case_ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps)
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(_snake_case)
for i, t in enumerate(scheduler.timesteps):
__SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = model(_snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = scheduler.step(_snake_case , _snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = output.prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_snake_case))
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_snake_case))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75) < 1E-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26) < 1E-3
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""")
__SCREAMING_SNAKE_CASE = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps)
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(_snake_case)
for i, t in enumerate(scheduler.timesteps):
__SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = model(_snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = scheduler.step(_snake_case , _snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = output.prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_snake_case))
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_snake_case))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53) < 1E-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03) < 1E-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97) < 1E-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25) < 1E-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21) < 1E-3
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case)
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_snake_case) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = model(_snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = scheduler.step(_snake_case , _snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = output.prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_snake_case))
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_snake_case))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38) < 1E-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26) < 1E-3
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case)
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case)
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_snake_case) * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(_snake_case)
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = model(_snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = scheduler.step(_snake_case , _snake_case , _snake_case)
__SCREAMING_SNAKE_CASE = output.prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_snake_case))
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_snake_case))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11) < 1E-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11) < 1E-2
| 155 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 0 |
'''simple docstring'''
from maths.prime_check import is_prime
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case: Any = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_SCREAMING_SNAKE_CASE )
if is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : Any=3.6 ):
"""simple docstring"""
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ):
"""simple docstring"""
__A = iter(self.dataset )
__A = True
while more_examples:
__A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_snake_case )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(_snake_case , truncation=_snake_case )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_snake_case ) , self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(_snake_case ) == self.seq_length:
yield torch.tensor(_snake_case )
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> Dict:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="""train""" , **_SCREAMING_SNAKE_CASE )
__A = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length )
__A = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
return eval_dataloader
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
with torch.no_grad():
__A = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) )
try:
__A = torch.exp(_SCREAMING_SNAKE_CASE )
except OverflowError:
__A = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
__a : Any = Accelerator()
# Parse configuration
__a : Dict = HfArgumentParser(EvaluationArguments)
__a : Optional[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
__a : Union[str, Any] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
__a : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__a : Any = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__a : List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
__a ,__a : List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
__a ,__a : List[str] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 637 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Union[str, Any] = {}
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Tuple = {}
def UpperCAmelCase__ (self , A , A , A ):
if nodea not in self.connections:
self.add_node(_snake_case )
if nodea not in self.connections:
self.add_node(_snake_case )
lowerCamelCase_ : Dict = probability
def UpperCAmelCase__ (self ):
return list(self.connections )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : str = 0
lowerCamelCase_ : int = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> dict[str, int]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[int] = Counter(graph.get_nodes() )
lowerCamelCase_ : Any = start
for _ in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : str = graph.transition(_SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 422 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
_A = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(_SCREAMING_SNAKE_CASE ):
_A = position % (lowest * 2) # puts it in bounds
_A = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_SCREAMING_SNAKE_CASE )
_A = ["".join(_SCREAMING_SNAKE_CASE ) for row in temp_grid]
_A = "".join(_SCREAMING_SNAKE_CASE )
return output_string
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = []
_A = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
_A = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
_A = position % (lowest * 2) # puts it in bounds
_A = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
_A = 0
for row in temp_grid: # fills in the characters
_A = input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )]
grid.append(list(_SCREAMING_SNAKE_CASE ) )
counter += len(_SCREAMING_SNAKE_CASE )
_A = "" # reads as zigzag
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
_A = position % (lowest * 2) # puts it in bounds
_A = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __lowercase ( __lowercase ) -> dict[int, str]:
'''simple docstring'''
_A = {}
for key_guess in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # tries every key
_A = decrypt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = CTRLTokenizer
_lowercase : int = False
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
_lowerCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_lowerCAmelCase = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
_lowerCAmelCase = {"unk_token": "<unk>"}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_snake_case ) )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = "adapt react readapt apt"
_lowerCAmelCase = "adapt react readapt apt"
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase = "adapt react readapt apt"
_lowerCAmelCase = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
_lowerCAmelCase = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
| 5 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
def is_in_circle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
__UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
__UpperCAmelCase = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 1.0 ) -> None:
'''simple docstring'''
def identity_function(SCREAMING_SNAKE_CASE ) -> float:
return x
__UpperCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
def function_to_integrate(SCREAMING_SNAKE_CASE ) -> float:
return sqrt(4.0 - x * x )
__UpperCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar("""T""")
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = data
__lowerCAmelCase : Dict = self
__lowerCAmelCase : int = 0
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = DisjointSetTreeNode(_snake_case )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCAmelCase : int = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
if nodea.rank > nodea.rank:
__lowerCAmelCase : List[Any] = nodea
else:
__lowerCAmelCase : List[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
self.link(self.find_set(_snake_case ) , self.find_set(_snake_case ) )
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : dict[T, dict[T, int]] = {}
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
if node not in self.connections:
__lowerCAmelCase : Union[str, Any] = {}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
self.add_node(_snake_case )
self.add_node(_snake_case )
__lowerCAmelCase : Dict = weight
__lowerCAmelCase : Union[str, Any] = weight
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = []
__lowerCAmelCase : str = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCAmelCase : x[2] )
# creating the disjoint set
__lowerCAmelCase : str = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_snake_case )
# MST generation
__lowerCAmelCase : int = 0
__lowerCAmelCase : int = 0
__lowerCAmelCase : str = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCAmelCase : Optional[int] = edges[index]
index += 1
__lowerCAmelCase : Dict = disjoint_set.find_set(_snake_case )
__lowerCAmelCase : str = disjoint_set.find_set(_snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_snake_case , _snake_case , _snake_case )
disjoint_set.union(_snake_case , _snake_case )
return graph
| 651 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
_lowercase = 0
_lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_lowercase = i + 1
else:
_lowercase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }') | 287 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
def _snake_case ( A_ : int ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("""Input value must be an 'int' type""" )
a_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_snake_case ).to(_snake_case )
UpperCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
UpperCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
UpperCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
UpperCAmelCase = model(input_ids.to(_snake_case ) , labels=labels.to(_snake_case ) ).loss
UpperCAmelCase = -(labels.shape[-1] * loss.item())
UpperCAmelCase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 377 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case_ ( lowerCAmelCase__):
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self):
raise NotImplementedError()
| 155 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline
__UpperCamelCase = ["image_embeds", "negative_image_embeds", "image", "hint"]
__UpperCamelCase = ["image_embeds", "negative_image_embeds", "image", "hint"]
__UpperCamelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCamelCase = False
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 1_00
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: List[Any] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case: Union[str, Any] = UNetaDConditionModel(**_snake_case )
return model
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: str = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.dummy_unet
snake_case: Dict = self.dummy_movq
snake_case: Dict = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
snake_case: Any = DDIMScheduler(**_snake_case )
snake_case: Any = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
snake_case: Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case: int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
snake_case: int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case: str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case: Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_56, 2_56) )
# create hint
snake_case: Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('mps' ):
snake_case: int = torch.manual_seed(_snake_case )
else:
snake_case: str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case: Optional[Any] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = "cpu"
snake_case: Tuple = self.get_dummy_components()
snake_case: List[Any] = self.pipeline_class(**_snake_case )
snake_case: List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case: List[str] = pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case: Optional[Any] = output.images
snake_case: Dict = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
snake_case: Optional[Any] = image[0, -3:, -3:, -1]
snake_case: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case: Union[str, Any] = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
snake_case: Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case: List[Any] = init_image.resize((5_12, 5_12) )
snake_case: Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
snake_case: int = torch.from_numpy(np.array(_snake_case ) ).float() / 2_55.0
snake_case: Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case: Optional[int] = "A robot, 4k photo"
snake_case: str = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case: int = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
snake_case: Dict = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case: Any = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case: Tuple = pipe_prior(
_snake_case , image=_snake_case , strength=0.85 , generator=_snake_case , negative_prompt='' , ).to_tuple()
snake_case: Optional[Any] = pipeline(
image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , hint=_snake_case , generator=_snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='np' , )
snake_case: Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_snake_case , _snake_case ) | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a : List[Any] = logging.get_logger(__name__)
class __lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] = True , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : Union[str, Any] = 0.9 , UpperCamelCase_ : Dict = PILImageResampling.BICUBIC , UpperCamelCase_ : Tuple = True , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : int = 1 / 255 , UpperCamelCase_ : str = True , UpperCamelCase_ : List[str] = True , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : int = None , **UpperCamelCase_ : Any , ):
"""simple docstring"""
super().__init__(**_snake_case )
__A = size if size is not None else {"shortest_edge": 224}
__A = get_size_dict(_snake_case , default_to_square=_snake_case )
__A = crop_size if crop_size is not None else {"height": 224, "width": 224}
__A = get_size_dict(_snake_case , param_name="""crop_size""" )
__A = do_resize
__A = size
__A = crop_pct
__A = resample
__A = do_center_crop
__A = crop_size
__A = do_rescale
__A = rescale_factor
__A = do_normalize
__A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str = None , UpperCamelCase_ : Union[str, Any] = PILImageResampling.BICUBIC , UpperCamelCase_ : str = None , **UpperCamelCase_ : str , ):
"""simple docstring"""
__A = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__A = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__A = int(size["""height"""] / crop_pct )
else:
__A = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(_snake_case ) )
__A = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case )
else:
if "shortest_edge" in size:
__A = get_resize_output_image_size(_snake_case , size=size["""shortest_edge"""] , default_to_square=_snake_case )
elif "height" in size and "width" in size:
__A = (size["height"], size["width"])
else:
raise ValueError("""Invalid size for resize: {}""".format(_snake_case ) )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] = None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__A = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain \'height\' and \'width\' as keys. Got {size.keys()}" )
return center_crop(_snake_case , size=(size["""height"""], size["""width"""]) , data_format=_snake_case , **_snake_case )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] = None , **UpperCamelCase_ : Any , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] = None , **UpperCamelCase_ : Tuple , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : str = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : str = None , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : int = None , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : int = ChannelDimension.FIRST , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__A = do_resize if do_resize is not None else self.do_resize
__A = crop_pct if crop_pct is not None else self.crop_pct
__A = resample if resample is not None else self.resample
__A = do_center_crop if do_center_crop is not None else self.do_center_crop
__A = do_rescale if do_rescale is not None else self.do_rescale
__A = rescale_factor if rescale_factor is not None else self.rescale_factor
__A = do_normalize if do_normalize is not None else self.do_normalize
__A = image_mean if image_mean is not None else self.image_mean
__A = image_std if image_std is not None else self.image_std
__A = size if size is not None else self.size
__A = get_size_dict(_snake_case , default_to_square=_snake_case )
__A = crop_size if crop_size is not None else self.crop_size
__A = get_size_dict(_snake_case , param_name="""crop_size""" )
__A = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__A = [self.resize(image=_snake_case , size=_snake_case , crop_pct=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
__A = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
__A = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
__A = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
__A = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__A = {"pixel_values": images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 637 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
__lowercase : Tuple = {'''bert_for_seq_generation''': 512}
class __lowercase ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[int] = []
lowerCamelCase : Any = ["input_ids", "attention_mask"]
def __init__(self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A="<::::>" , A = None , **A , ):
lowerCamelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , sep_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCamelCase_ : Optional[Any] = vocab_file
lowerCamelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def UpperCAmelCase__ (self ):
return self.sp_model.get_piece_size()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
lowerCamelCase_ : Tuple = self.__dict__.copy()
lowerCamelCase_ : Dict = None
return state
def __setstate__(self , A ):
lowerCamelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ : Union[str, Any] = {}
lowerCamelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ (self , A ):
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ (self , A ):
return self.sp_model.piece_to_id(_snake_case )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : str = self.sp_model.IdToPiece(_snake_case )
return token
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
lowerCamelCase_ : int = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
lowerCamelCase_ : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
lowerCamelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 422 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCamelCase_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
super().__init__(*_snake_case , **_snake_case )
_A = eval_examples
_A = post_process_function
_A = quant_trainer_args
_A = 128 # default number of calibration samples
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[int]=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
_A = calib_dataset if calib_dataset is not None else self.calib_dataset
_A = self._remove_unused_columns(_snake_case , description="Calibration" )
return DataLoader(
_snake_case , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_snake_case , )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any]=None ):
'''simple docstring'''
_A = self.train_dataset if calib_dataset is None else calib_dataset
_A = self.get_calib_dataloader(_snake_case )
_A = self.model
quant_trainer.configure_model(_snake_case , self.quant_trainer_args , calib=_snake_case )
model.eval()
quant_trainer.enable_calibration(_snake_case )
logger.info("***** Running calibration *****" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(_snake_case ):
# Prediction step
_A = self.prediction_step(_snake_case , _snake_case , prediction_loss_only=_snake_case )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_snake_case , self.quant_trainer_args )
_A = model
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str = "eval" ):
'''simple docstring'''
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_snake_case )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
_snake_case , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_snake_case , )
finally:
_A = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_A = self.post_process_function(_snake_case , _snake_case , output.predictions )
_A = self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
_A = metrics.pop(_snake_case )
self.log(_snake_case )
else:
_A = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _snake_case )
return metrics
def lowerCAmelCase ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int] = "test" ):
'''simple docstring'''
_A = self.get_test_dataloader(_snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
_snake_case , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_snake_case , )
finally:
_A = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_snake_case , _snake_case , output.predictions , "predict" )
_A = self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
_A = metrics.pop(_snake_case )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_snake_case )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any]="./" ):
'''simple docstring'''
_A = self.eval_dataset
_A = self.get_eval_dataloader(_snake_case )
_A = next(iter(_snake_case ) )
# saving device - to make it consistent
_A = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
_A = tuple(v.to(_snake_case ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
_A = True
_A = self.model.to(_snake_case )
model.eval()
model.float()
_A = model.module if hasattr(_snake_case , "module" ) else model
quant_trainer.configure_model(_snake_case , self.quant_trainer_args )
_A = os.path.join(_snake_case , "model.onnx" )
logger.info(f'''exporting model to {output_model_file}''' )
_A = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
_snake_case , _snake_case , _snake_case , export_params=_snake_case , opset_version=13 , do_constant_folding=_snake_case , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=_snake_case , )
logger.info("onnx export finished" )
| 330 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_lowercase = 50003
_lowercase = 50002
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = PLBartTokenizer
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PLBartTokenizer(_snake_case , language_codes="""base""" , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = PLBartTokenizer(_snake_case , language_codes="""base""" , keep_accents=_snake_case )
_lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 4 , _snake_case )]
self.assertListEqual(_snake_case , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
_lowerCAmelCase = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_lowerCAmelCase = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = PLBartTokenizer(_snake_case , language_codes="""multi""" , keep_accents=_snake_case )
_lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 7 , _snake_case )]
self.assertListEqual(
_snake_case , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
_lowerCAmelCase = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_lowerCAmelCase = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[Any] = "uclanlp/plbart-python-en_XX"
_lowercase : Any = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
_lowercase : Optional[int] = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
_lowercase : List[str] = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _lowercase ( cls ):
"""simple docstring"""
_lowerCAmelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
_lowerCAmelCase = 1
return cls
def _lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50_003 )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
_lowerCAmelCase = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCAmelCase = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
_lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _snake_case )
_lowerCAmelCase = 10
_lowerCAmelCase = self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _snake_case )
self.assertEqual(len(_snake_case ) , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50_004, 50_001] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
_lowerCAmelCase = PLBartTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors="""pt""" )
_lowerCAmelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_lowerCAmelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors="""pt""" )
_lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors="""pt""" )
_lowerCAmelCase = targets["input_ids"]
_lowerCAmelCase = shift_tokens_right(_snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(_snake_case ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50_003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50_001,
} , )
| 5 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : int = 16
A_ : str = 32
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1_6 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase = 1_6
elif accelerator.mixed_precision != "no":
__UpperCAmelCase = 8
else:
__UpperCAmelCase = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=_SCREAMING_SNAKE_CASE )
__UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase = config["lr"]
__UpperCAmelCase = int(config['''num_epochs'''] )
__UpperCAmelCase = int(config['''seed'''] )
__UpperCAmelCase = int(config['''batch_size'''] )
__UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_0_0 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
__UpperCAmelCase = outputs.loss
__UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
__UpperCAmelCase = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
__UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
def __a ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 303 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : torch.Tensor # [batch_size x 3]
lowerCamelCase : torch.Tensor # [batch_size x 3]
lowerCamelCase : torch.Tensor # [batch_size x 3]
lowerCamelCase : torch.Tensor # [batch_size x 3]
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float
lowerCamelCase : float
lowerCamelCase : Tuple[int]
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = torch.arange(self.height * self.width )
__lowerCAmelCase : Tuple = torch.stack(
[
pixel_indices % self.width,
torch.div(_snake_case , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.shape
__lowerCAmelCase : Dict = int(np.prod(_snake_case ) )
__lowerCAmelCase : Optional[int] = self.get_image_coords()
__lowerCAmelCase : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCAmelCase : Dict = self.get_camera_rays(_snake_case )
__lowerCAmelCase : int = rays.view(_snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCAmelCase : Dict = coords.view(_snake_case , -1 , 2 )
__lowerCAmelCase : Optional[Any] = self.resolution()
__lowerCAmelCase : Optional[Any] = self.fov()
__lowerCAmelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
__lowerCAmelCase : str = fracs * torch.tan(fov / 2 )
__lowerCAmelCase : str = fracs.view(_snake_case , -1 , 2 )
__lowerCAmelCase : Optional[Any] = (
self.z.view(_snake_case , 1 , 3 )
+ self.x.view(_snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCAmelCase : List[Any] = directions / directions.norm(dim=-1 , keepdim=_snake_case )
__lowerCAmelCase : str = torch.stack(
[
torch.broadcast_to(self.origin.view(_snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_snake_case , *_snake_case , 2 , 3 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_snake_case , height=_snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ (__A : int ) -> DifferentiableProjectiveCamera:
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Union[str, Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__lowerCAmelCase : Tuple = np.array([np.sin(_SCREAMING_SNAKE_CASE ), np.cos(_SCREAMING_SNAKE_CASE ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCAmelCase : Optional[Any] = -z * 4
__lowerCAmelCase : List[str] = np.array([np.cos(_SCREAMING_SNAKE_CASE ), -np.sin(_SCREAMING_SNAKE_CASE ), 0.0] )
__lowerCAmelCase : Dict = np.cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
origins.append(_SCREAMING_SNAKE_CASE )
xs.append(_SCREAMING_SNAKE_CASE )
ys.append(_SCREAMING_SNAKE_CASE )
zs.append(_SCREAMING_SNAKE_CASE )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_SCREAMING_SNAKE_CASE )) , )
| 651 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A : Tuple = logging.getLogger(__name__)
@dataclass
class a_ :
a : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
a : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
a : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class a_ :
a : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
a : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
a : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
a : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
a : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
a : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
a : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Source language id for translation.'''} )
a : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Target language id for translation.'''} )
a : Optional[int] = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
a : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , f"""{split}_results.json""" ) )
def UpperCamelCase__ ( ) -> Any:
_lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), f"""({config.__class__.__name__}) doesn\'t have a `{p}` attribute"""
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowercase = SeqaSeqDataset
# Get datasets
_lowercase = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowercase = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowercase = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowercase = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
_lowercase = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
_lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowercase = train_result.metrics
_lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowercase = trainer.evaluate(metric_key_prefix="""val""" )
_lowercase = data_args.n_val
_lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowercase = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="""test""" )
_lowercase = test_output.metrics
_lowercase = data_args.n_test
if trainer.is_world_process_zero():
_lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
_lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
_lowercase = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
main()
if __name__ == "__main__":
main() | 287 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__snake_case: Tuple = logging.get_logger(__name__)
# General docstring
__snake_case: List[Any] = "RegNetConfig"
# Base docstring
__snake_case: Optional[int] = "facebook/regnet-y-040"
__snake_case: Dict = [1, 10_88, 7, 7]
# Image classification docstring
__snake_case: Dict = "facebook/regnet-y-040"
__snake_case: Dict = "tabby, tabby cat"
__snake_case: List[str] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , **lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__(**_snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
a_ : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
a_ : Dict = tf.keras.layers.ConvaD(
filters=_snake_case , kernel_size=_snake_case , strides=_snake_case , padding="""VALID""" , groups=_snake_case , use_bias=_snake_case , name="""convolution""" , )
a_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
a_ : Dict = ACTaFN[activation] if activation is not None else tf.identity
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] = self.convolution(self.padding(_snake_case ) )
a_ : Union[str, Any] = self.normalization(_snake_case )
a_ : str = self.activation(_snake_case )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(**_snake_case )
a_ : Tuple = config.num_channels
a_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = shape_list(_snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
a_ : Tuple = tf.transpose(_snake_case , perm=(0, 2, 3, 1) )
a_ : Optional[Any] = self.embedder(_snake_case )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 2 , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(**_snake_case )
a_ : Optional[Any] = tf.keras.layers.ConvaD(
filters=_snake_case , kernel_size=1 , strides=_snake_case , use_bias=_snake_case , name="""convolution""" )
a_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
return self.normalization(self.convolution(_snake_case ) , training=_snake_case )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(**_snake_case )
a_ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case , name="""pooler""" )
a_ : Tuple = [
tf.keras.layers.ConvaD(filters=_snake_case , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=_snake_case , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = self.pooler(_snake_case )
for layer_module in self.attention:
a_ : Dict = layer_module(_snake_case )
a_ : Any = hidden_state * pooled
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(**_snake_case )
a_ : str = in_channels != out_channels or stride != 1
a_ : Optional[int] = max(1 , out_channels // config.groups_width )
a_ : Tuple = (
TFRegNetShortCut(_snake_case , stride=_snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
a_ : List[str] = [
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
_snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=_snake_case , name="""layer.2""" ),
]
a_ : List[Any] = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = hidden_state
for layer_module in self.layers:
a_ : str = layer_module(_snake_case )
a_ : List[str] = self.shortcut(_snake_case )
hidden_state += residual
a_ : Any = self.activation(_snake_case )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(**_snake_case )
a_ : int = in_channels != out_channels or stride != 1
a_ : Optional[Any] = max(1 , out_channels // config.groups_width )
a_ : Optional[Any] = (
TFRegNetShortCut(_snake_case , stride=_snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
a_ : Optional[Any] = [
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
_snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(_snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=_snake_case , name="""layer.3""" ),
]
a_ : List[str] = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] = hidden_state
for layer_module in self.layers:
a_ : Union[str, Any] = layer_module(_snake_case )
a_ : Tuple = self.shortcut(_snake_case )
hidden_state += residual
a_ : Dict = self.activation(_snake_case )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(**_snake_case )
a_ : Dict = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
a_ : Dict = [
# downsampling is done in the first layer with stride of 2
layer(_snake_case , _snake_case , _snake_case , stride=_snake_case , name="""layers.0""" ),
*[layer(_snake_case , _snake_case , _snake_case , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
for layer_module in self.layers:
a_ : int = layer_module(_snake_case )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(**_snake_case )
a_ : Optional[int] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
a_ : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_snake_case , _snake_case , _snake_case , depth=_snake_case , name=f'''stages.{i+1}''' ) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ):
'''simple docstring'''
a_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ : List[Any] = hidden_states + (hidden_state,)
a_ : int = stage_module(_snake_case )
if output_hidden_states:
a_ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case )
@keras_serializable
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
a_ = RegNetConfig
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(**_snake_case )
a_ : Any = config
a_ : int = TFRegNetEmbeddings(_snake_case , name="""embedder""" )
a_ : List[str] = TFRegNetEncoder(_snake_case , name="""encoder""" )
a_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case , name="""pooler""" )
@unpack_inputs
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ):
'''simple docstring'''
a_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Optional[int] = self.embedder(_snake_case , training=_snake_case )
a_ : List[Any] = self.encoder(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case )
a_ : List[str] = encoder_outputs[0]
a_ : Optional[int] = self.pooler(_snake_case )
# Change to NCHW output format have uniformity in the modules
a_ : Dict = tf.transpose(_snake_case , perm=(0, 3, 1, 2) )
a_ : List[str] = tf.transpose(_snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
a_ : int = tuple([tf.transpose(_snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ = RegNetConfig
a_ = "regnet"
a_ = "pixel_values"
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
__snake_case: Tuple = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__snake_case: Union[str, Any] = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,__SCREAMING_SNAKE_CASE ,)
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(_snake_case , *_snake_case , **_snake_case )
a_ : Optional[int] = TFRegNetMainLayer(_snake_case , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , ):
'''simple docstring'''
a_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Optional[Any] = self.regnet(
pixel_values=_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,__SCREAMING_SNAKE_CASE ,)
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(_snake_case , *_snake_case , **_snake_case )
a_ : Dict = config.num_labels
a_ : List[str] = TFRegNetMainLayer(_snake_case , name="""regnet""" )
# classification head
a_ : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , ):
'''simple docstring'''
a_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Any = self.regnet(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case )
a_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
a_ : Union[str, Any] = self.classifier[0](_snake_case )
a_ : List[str] = self.classifier[1](_snake_case )
a_ : str = None if labels is None else self.hf_compute_loss(labels=_snake_case , logits=_snake_case )
if not return_dict:
a_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states )
| 577 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->List[Any]:
UpperCAmelCase = 1.5
UpperCAmelCase = int(factor * num_class_images )
UpperCAmelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
UpperCAmelCase = int(factor * num_images )
UpperCAmelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = tqdm(desc="""downloading real regularization images""" , total=_SCREAMING_SNAKE_CASE )
with open(F"""{class_data_dir}/caption.txt""" , """w""" ) as fa, open(F"""{class_data_dir}/urls.txt""" , """w""" ) as fa, open(
F"""{class_data_dir}/images.txt""" , """w""" ) as fa:
while total < num_class_images:
UpperCAmelCase = class_images[count]
count += 1
try:
UpperCAmelCase = requests.get(images["""url"""] )
if img.status_code == 2_0_0:
UpperCAmelCase = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _UpperCamelCase ( ) ->Tuple:
UpperCAmelCase = argparse.ArgumentParser("""""" , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_0_0 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
__a = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 377 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = StableDiffusionPanoramaPipeline
__lowercase : Tuple = TEXT_TO_IMAGE_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler()
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(_snake_case)
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
__SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_snake_case)
__SCREAMING_SNAKE_CASE = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe(**_snake_case).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case_ ( self):
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def snake_case_ ( self):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_snake_case)
__SCREAMING_SNAKE_CASE = "french fries"
__SCREAMING_SNAKE_CASE = sd_pipe(**_snake_case , negative_prompt=_snake_case)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe(**_snake_case , view_batch_size=2)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""")
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe(**_snake_case).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , skip_prk_steps=_snake_case)
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe(**_snake_case).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_snake_case)
__SCREAMING_SNAKE_CASE = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-2-base"
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_snake_case , subfolder="""scheduler""")
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_snake_case).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
__SCREAMING_SNAKE_CASE = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
])
assert np.abs(expected_slice - image_slice).max() < 1E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_snake_case)
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_snake_case).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
__SCREAMING_SNAKE_CASE = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = 0
def callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) -> None:
__SCREAMING_SNAKE_CASE = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-2-base"
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_snake_case , subfolder="""scheduler""")
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case)
__SCREAMING_SNAKE_CASE = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
pipe(**_snake_case , callback=_snake_case , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case_ ( self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-2-base"
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_snake_case , subfolder="""scheduler""")
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case)
__SCREAMING_SNAKE_CASE = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_snake_case)
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 155 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = inspect.getfile(accelerate.test_utils )
snake_case: List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
snake_case: Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
snake_case: Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def _UpperCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
snake_case: Union[str, Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
snake_case: List[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
snake_case: Any = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = (accelerator.state.process_index + 2, 10)
__UpperCAmelCase = torch.randint(0, 10, shape).to(accelerator.device)
__UpperCAmelCase = ""
__UpperCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__UpperCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__UpperCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__a : Union[str, Any] = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__a : List[Any] = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__a : Union[str, Any] = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__a : Optional[int] = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
__a : Tuple = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
__a : str = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def _SCREAMING_SNAKE_CASE ( __lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] , __lowercase : Optional[int]=False ) -> List[Any]:
"""simple docstring"""
__A = checkpoint[f"{old_prefix}.in_layers.0.weight"]
__A = checkpoint[f"{old_prefix}.in_layers.0.bias"]
__A = checkpoint[f"{old_prefix}.in_layers.2.weight"]
__A = checkpoint[f"{old_prefix}.in_layers.2.bias"]
__A = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
__A = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
__A = checkpoint[f"{old_prefix}.out_layers.0.weight"]
__A = checkpoint[f"{old_prefix}.out_layers.0.bias"]
__A = checkpoint[f"{old_prefix}.out_layers.3.weight"]
__A = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
__A = checkpoint[f"{old_prefix}.skip_connection.weight"]
__A = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : Any , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple=None ) -> str:
"""simple docstring"""
__A = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
__A = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
__A = checkpoint[f"{old_prefix}.norm.weight"]
__A = checkpoint[f"{old_prefix}.norm.bias"]
__A = weight_q.squeeze(-1 ).squeeze(-1 )
__A = bias_q.squeeze(-1 ).squeeze(-1 )
__A = weight_k.squeeze(-1 ).squeeze(-1 )
__A = bias_k.squeeze(-1 ).squeeze(-1 )
__A = weight_v.squeeze(-1 ).squeeze(-1 )
__A = bias_v.squeeze(-1 ).squeeze(-1 )
__A = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
__A = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
__A = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__A = {}
__A = checkpoint["time_embed.0.weight"]
__A = checkpoint["time_embed.0.bias"]
__A = checkpoint["time_embed.2.weight"]
__A = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
__A = checkpoint["label_emb.weight"]
__A = checkpoint["input_blocks.0.0.weight"]
__A = checkpoint["input_blocks.0.0.bias"]
__A = unet_config["down_block_types"]
__A = unet_config["layers_per_block"]
__A = unet_config["attention_head_dim"]
__A = unet_config["block_out_channels"]
__A = 1
__A = channels_list[0]
for i, layer_type in enumerate(_SCREAMING_SNAKE_CASE ):
__A = channels_list[i]
__A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_SCREAMING_SNAKE_CASE ):
__A = f"down_blocks.{i}.resnets.{j}"
__A = f"input_blocks.{current_layer}.0"
__A = True if j == 0 and downsample_block_has_skip else False
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_SCREAMING_SNAKE_CASE ):
__A = f"down_blocks.{i}.resnets.{j}"
__A = f"input_blocks.{current_layer}.0"
__A = True if j == 0 and downsample_block_has_skip else False
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
__A = f"down_blocks.{i}.attentions.{j}"
__A = f"input_blocks.{current_layer}.1"
__A = convert_attention(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
__A = f"down_blocks.{i}.downsamplers.0"
__A = f"input_blocks.{current_layer}.0"
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
__A = current_channels
# hardcoded the mid-block for now
__A = "mid_block.resnets.0"
__A = "middle_block.0"
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = "mid_block.attentions.0"
__A = "middle_block.1"
__A = convert_attention(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = "mid_block.resnets.1"
__A = "middle_block.2"
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = 0
__A = unet_config["up_block_types"]
for i, layer_type in enumerate(_SCREAMING_SNAKE_CASE ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__A = f"up_blocks.{i}.resnets.{j}"
__A = f"output_blocks.{current_layer}.0"
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
__A = f"up_blocks.{i}.upsamplers.0"
__A = f"output_blocks.{current_layer-1}.1"
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__A = f"up_blocks.{i}.resnets.{j}"
__A = f"output_blocks.{current_layer}.0"
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
__A = f"up_blocks.{i}.attentions.{j}"
__A = f"output_blocks.{current_layer}.1"
__A = convert_attention(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
__A = f"up_blocks.{i}.upsamplers.0"
__A = f"output_blocks.{current_layer-1}.2"
__A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = checkpoint["out.0.weight"]
__A = checkpoint["out.0.bias"]
__A = checkpoint["out.2.weight"]
__A = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__a : Dict = parser.parse_args()
__a : Optional[Any] = strabool(args.class_cond)
__a : Tuple = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__a : int = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__a : List[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__a : int = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__a : Any = None
__a : List[str] = con_pt_to_diffuser(args.unet_path, unet_config)
__a : int = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__a : List[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__a : int = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__a : List[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
__a : Any = CMStochasticIterativeScheduler(**scheduler_config)
__a : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 637 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
'''simple docstring'''
import math
def lowercase_ ( _lowercase = 100 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = sum(i * i for i in range(1 , n + 1 ) )
lowerCamelCase_ : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 422 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowercase ( __lowercase = 100 ) -> int:
'''simple docstring'''
_A = 1
_A = 2
for i in range(2 , max_n + 1 ):
_A = pre_numerator
_A = 2 * i // 3 if i % 3 == 0 else 1
_A = cur_numerator
_A = e_cont * pre_numerator + temp
return sum_digits(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 330 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def A (__lowerCamelCase :bytes ):
if len(_SCREAMING_SNAKE_CASE ) != 32:
raise ValueError("""Input must be of length 32""" )
_lowerCAmelCase = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def A (__lowerCamelCase :int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
_lowerCAmelCase = format(_SCREAMING_SNAKE_CASE , """08x""" )[-8:]
_lowerCAmelCase = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def A (__lowerCamelCase :bytes ):
_lowerCAmelCase = B""
for char in message:
bit_string += format(_SCREAMING_SNAKE_CASE , """08b""" ).encode("""utf-8""" )
_lowerCAmelCase = format(len(_SCREAMING_SNAKE_CASE ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_SCREAMING_SNAKE_CASE ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def A (__lowerCamelCase :bytes ):
if len(_SCREAMING_SNAKE_CASE ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(_SCREAMING_SNAKE_CASE ) , 512 ):
_lowerCAmelCase = bit_string[pos : pos + 512]
_lowerCAmelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def A (__lowerCamelCase :int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
_lowerCAmelCase = format(_SCREAMING_SNAKE_CASE , """032b""" )
_lowerCAmelCase = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_SCREAMING_SNAKE_CASE , 2 )
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
return (a + b) % 2**32
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def A (__lowerCamelCase :bytes ):
_lowerCAmelCase = preprocess(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCAmelCase = 0X67452301
_lowerCAmelCase = 0Xefcdab89
_lowerCAmelCase = 0X98badcfe
_lowerCAmelCase = 0X10325476
_lowerCAmelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = aa
_lowerCAmelCase = ba
_lowerCAmelCase = ca
_lowerCAmelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCAmelCase = d ^ (b & (c ^ d))
_lowerCAmelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCAmelCase = c ^ (d & (b ^ c))
_lowerCAmelCase = (5 * i + 1) % 16
elif i <= 47:
_lowerCAmelCase = b ^ c ^ d
_lowerCAmelCase = (3 * i + 5) % 16
else:
_lowerCAmelCase = c ^ (b | not_aa(_SCREAMING_SNAKE_CASE ))
_lowerCAmelCase = (7 * i) % 16
_lowerCAmelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCAmelCase = d
_lowerCAmelCase = c
_lowerCAmelCase = b
_lowerCAmelCase = sum_aa(_SCREAMING_SNAKE_CASE , left_rotate_aa(_SCREAMING_SNAKE_CASE , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCAmelCase = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ) -> Union[str, Any]:
super().__init__(features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , **_snake_case )
__UpperCAmelCase = Sql(
cache_dir=_snake_case , features=_snake_case , sql=_snake_case , con=_snake_case , **_snake_case , )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , )
# Build dataset for splits
__UpperCAmelCase = self.builder.as_dataset(
split='''train''' , verification_mode=_snake_case , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , **lowercase__ , ) -> Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__UpperCAmelCase = dataset
__UpperCAmelCase = name
__UpperCAmelCase = con
__UpperCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCAmelCase = num_proc
__UpperCAmelCase = to_sql_kwargs
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.to_sql_kwargs.pop('''sql''' , _snake_case )
__UpperCAmelCase = self.to_sql_kwargs.pop('''con''' , _snake_case )
__UpperCAmelCase = self.to_sql_kwargs.pop('''index''' , _snake_case )
__UpperCAmelCase = self._write(index=_snake_case , **self.to_sql_kwargs )
return written
def lowerCAmelCase_ (self , lowercase__ ) -> List[Any]:
__UpperCAmelCase = args
__UpperCAmelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
__UpperCAmelCase = query_table(
table=self.dataset.data , key=slice(_snake_case , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCAmelCase = batch.to_pandas()
__UpperCAmelCase = df.to_sql(self.name , self.con , index=_snake_case , **_snake_case )
return num_rows or len(_snake_case )
def lowerCAmelCase_ (self , lowercase__ , **lowercase__ ) -> Any:
__UpperCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _snake_case , _snake_case )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 0 |
from typing import Any
def snake_case_ (__A : list , __A : list , __A : dict , __A : dict , __A : dict , ) -> list:
_validation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
__lowerCAmelCase : dict = {}
__lowerCAmelCase : dict = {}
for state in states_space:
__lowerCAmelCase : int = observations_space[0]
__lowerCAmelCase : Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__lowerCAmelCase : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : Optional[int] = observations_space[o]
__lowerCAmelCase : Optional[int] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__lowerCAmelCase : Optional[int] = ""
__lowerCAmelCase : List[str] = -1
for k_state in states_space:
__lowerCAmelCase : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__lowerCAmelCase : str = probability
__lowerCAmelCase : Dict = k_state
# Update probabilities and pointers dicts
__lowerCAmelCase : Optional[int] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__lowerCAmelCase : int = arg_max
# The final observation
__lowerCAmelCase : int = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = -1
for k_state in states_space:
__lowerCAmelCase : Any = probabilities[(k_state, final_observation)]
if probability > max_probability:
__lowerCAmelCase : Tuple = probability
__lowerCAmelCase : List[str] = k_state
__lowerCAmelCase : Tuple = arg_max
# Process pointers backwards
__lowerCAmelCase : Optional[Any] = last_state
__lowerCAmelCase : str = []
for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = pointers[previous, observations_space[o]]
result.reverse()
return result
def snake_case_ (__A : Any , __A : Any , __A : Any , __A : Any , __A : Any , ) -> None:
_validate_not_empty(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_validate_dicts(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ (__A : Any , __A : Any , __A : Any , __A : Any , __A : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def snake_case_ (__A : Any , __A : Any ) -> None:
_validate_list(_SCREAMING_SNAKE_CASE , """observations_space""" )
_validate_list(_SCREAMING_SNAKE_CASE , """states_space""" )
def snake_case_ (__A : Any , __A : str ) -> None:
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = f'''{var_name} must be a list'''
raise ValueError(_SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = f'''{var_name} must be a list of strings'''
raise ValueError(_SCREAMING_SNAKE_CASE )
def snake_case_ (__A : Any , __A : Any , __A : Any , ) -> None:
_validate_dict(_SCREAMING_SNAKE_CASE , """initial_probabilities""" , _SCREAMING_SNAKE_CASE )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , """transition_probabilities""" )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , """emission_probabilities""" )
def snake_case_ (__A : Any , __A : str ) -> None:
_validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ (__A : Any , __A : str , __A : type , __A : bool = False ) -> None:
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = f'''{var_name} must be a dict'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ):
__lowerCAmelCase : Tuple = f'''{var_name} all keys must be strings'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ):
__lowerCAmelCase : Tuple = "nested dictionary " if nested else ""
__lowerCAmelCase : int = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 651 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 287 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__snake_case: Dict = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__snake_case: List[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__snake_case: int = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
}
| 577 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = TextaTextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : int , __lowerCamelCase : str , __lowerCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = generator("""Something there""" )
self.assertEqual(_snake_case , [{"""generated_text""": ANY(_snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
UpperCAmelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}],
[{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}],
] , )
UpperCAmelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}],
[{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
generator(4 )
@require_torch
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
UpperCAmelCase = generator("""Something there""" , do_sample=_snake_case )
self.assertEqual(_snake_case , [{"""generated_text""": """"""}] )
UpperCAmelCase = 3
UpperCAmelCase = generator(
"""Something there""" , num_return_sequences=_snake_case , num_beams=_snake_case , )
UpperCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(_snake_case , _snake_case )
UpperCAmelCase = generator("""This is a test""" , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
UpperCAmelCase = generator.model.config.eos_token_id
UpperCAmelCase = "<pad>"
UpperCAmelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
UpperCAmelCase = generator("""Something there""" , do_sample=_snake_case )
self.assertEqual(_snake_case , [{"""generated_text""": """"""}] )
| 377 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = 0
def snake_case_ ( self):
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto"""))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""")
self.assertIsInstance(_snake_case , _snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""")
self.assertIsInstance(_snake_case , _snake_case)
def snake_case_ ( self):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__SCREAMING_SNAKE_CASE = os.path.join(_snake_case , """fake-roberta""")
os.makedirs(_snake_case , exist_ok=_snake_case)
with open(os.path.join(_snake_case , """config.json""") , """w""") as f:
f.write(json.dumps({}))
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(type(_snake_case) , _snake_case)
def snake_case_ ( self):
try:
AutoConfig.register("""custom""" , _snake_case)
# Wrong model type will raise an error
with self.assertRaises(_snake_case):
AutoConfig.register("""model""" , _snake_case)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_snake_case):
AutoConfig.register("""bert""" , _snake_case)
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case)
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def snake_case_ ( self):
with self.assertRaisesRegex(
_snake_case , """bert-base is not a local folder and is not a valid model identifier"""):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""")
def snake_case_ ( self):
with self.assertRaisesRegex(
_snake_case , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"""):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case , revision="""aaaaaa""")
def snake_case_ ( self):
with self.assertRaisesRegex(
_snake_case , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""")
def snake_case_ ( self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_snake_case):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""")
# If remote code is disabled, we can't load this config.
with self.assertRaises(_snake_case):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_snake_case)
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_snake_case)
self.assertEqual(config.__class__.__name__ , """NewModelConfig""")
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case)
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case , trust_remote_code=_snake_case)
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""")
def snake_case_ ( self):
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowercase : Optional[int] = "new-model"
try:
AutoConfig.register("""new-model""" , _snake_case)
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""")
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""")
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_snake_case)
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""")
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_snake_case)
self.assertEqual(config.__class__.__name__ , """NewModelConfig""")
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 155 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Dict , __A : Optional[Any] ):
'''simple docstring'''
snake_case: Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case: List[str] = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __lowercase : list[int] , __lowercase : list[int] , __lowercase : list[int] , __lowercase : list[list[str]] , __lowercase : int , ) -> None:
"""simple docstring"""
__A = len(_SCREAMING_SNAKE_CASE )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_SCREAMING_SNAKE_CASE ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> None:
"""simple docstring"""
__A = []
depth_first_search([] , [] , [] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print all the boards
for board in boards:
for column in board:
print(_SCREAMING_SNAKE_CASE )
print("""""" )
print(len(_SCREAMING_SNAKE_CASE ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 637 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
'''simple docstring'''
def lowercase_ ( _lowercase ) -> list:
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(_SCREAMING_SNAKE_CASE )]
lowerCamelCase_ : List[str] = []
def generate(_lowercase , _lowercase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _SCREAMING_SNAKE_CASE )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCamelCase_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
lowerCamelCase_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , _SCREAMING_SNAKE_CASE )
generate(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
__lowercase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
__lowercase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 422 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase_ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __lowercase ( __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __lowercase ( __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __lowercase ( __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
_A = expected_configs[0]
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 330 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :List[str] , __lowerCamelCase :int=False ):
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
_lowerCAmelCase = "segformer.encoder." + key
if key.startswith("""backbone""" ):
_lowerCAmelCase = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(_SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
_lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
_lowerCAmelCase = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(_SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase = key.replace(f'block{idx}' , f'block.{int(_SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
_lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase = key.replace(f'linear_c{idx}' , f'linear_c.{int(_SCREAMING_SNAKE_CASE )-1}' )
if key.startswith("""head""" ):
_lowerCAmelCase = key.replace("""head""" , """classifier""" )
_lowerCAmelCase = value
return new_state_dict
def A (__lowerCamelCase :int , __lowerCamelCase :Union[str, Any] ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
_lowerCAmelCase = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def A ():
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def A (__lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :Optional[int] ):
_lowerCAmelCase = SegformerConfig()
_lowerCAmelCase = False
# set attributes based on model_name
_lowerCAmelCase = "huggingface/label-files"
if "segformer" in model_name:
_lowerCAmelCase = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
_lowerCAmelCase = 150
_lowerCAmelCase = "ade20k-id2label.json"
_lowerCAmelCase = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCAmelCase = 19
_lowerCAmelCase = "cityscapes-id2label.json"
_lowerCAmelCase = (1, 19, 128, 128)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
_lowerCAmelCase = True
_lowerCAmelCase = model_name[4:6]
_lowerCAmelCase = 1000
_lowerCAmelCase = "imagenet-1k-id2label.json"
_lowerCAmelCase = (1, 1000)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
_lowerCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 256
elif size == "b2":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 768
_lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 768
_lowerCAmelCase = [3, 4, 18, 3]
elif size == "b4":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 768
_lowerCAmelCase = [3, 8, 27, 3]
elif size == "b5":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 768
_lowerCAmelCase = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
_lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
else:
_lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )["state_dict"]
# rename keys
_lowerCAmelCase = rename_keys(_SCREAMING_SNAKE_CASE , encoder_only=_SCREAMING_SNAKE_CASE )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCAmelCase = False
_lowerCAmelCase = SegformerForImageClassification(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCAmelCase = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
_lowerCAmelCase = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 5 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ) -> Optional[int]:
__UpperCAmelCase = path_or_paths
__UpperCAmelCase = split if split or isinstance(_snake_case , _snake_case ) else "train"
__UpperCAmelCase = features
__UpperCAmelCase = cache_dir
__UpperCAmelCase = keep_in_memory
__UpperCAmelCase = streaming
__UpperCAmelCase = num_proc
__UpperCAmelCase = kwargs
@abstractmethod
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
class A_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ) -> List[str]:
__UpperCAmelCase = features
__UpperCAmelCase = cache_dir
__UpperCAmelCase = keep_in_memory
__UpperCAmelCase = streaming
__UpperCAmelCase = num_proc
__UpperCAmelCase = kwargs
@abstractmethod
def lowerCAmelCase_ (self ) -> int:
pass
| 303 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = """Hello world! cécé herlolip"""
__UpperCAmelCase = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def snake_case_ (__A : Optional[Any] , __A : int ) -> Tuple:
__lowerCAmelCase : Dict = BertAbsConfig(
temp_dir=""".""" , finetune_bert=_SCREAMING_SNAKE_CASE , large=_SCREAMING_SNAKE_CASE , share_emb=_SCREAMING_SNAKE_CASE , use_bert_emb=_SCREAMING_SNAKE_CASE , encoder="""bert""" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
__lowerCAmelCase : Tuple = torch.load(_SCREAMING_SNAKE_CASE , lambda __A , __A : storage )
__lowerCAmelCase : int = AbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device("""cpu""" ) , _SCREAMING_SNAKE_CASE )
original.eval()
__lowerCAmelCase : Union[str, Any] = BertAbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
__lowerCAmelCase : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
__lowerCAmelCase : Union[str, Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_SCREAMING_SNAKE_CASE )) )
__lowerCAmelCase : Any = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
__lowerCAmelCase : Optional[Any] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_SCREAMING_SNAKE_CASE )) )
__lowerCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowerCAmelCase : Tuple = encoder_input_ids
__lowerCAmelCase : Optional[int] = decoder_input_ids
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowerCAmelCase : List[str] = original(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Any = original.generator(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = new_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : List[str] = new_model.generator(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : str = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : int = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
__UpperCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 651 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class a_ ( __SCREAMING_SNAKE_CASE ):
a : torch.FloatTensor
a : Optional[torch.FloatTensor] = None
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=0.9_99 , SCREAMING_SNAKE_CASE_ : List[Any]="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowercase = i / num_diffusion_timesteps
_lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class a_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCamelCase = 1_000 , __UpperCamelCase = "fixed_small_log" , __UpperCamelCase = True , __UpperCamelCase = 1.0 , __UpperCamelCase = "epsilon" , __UpperCamelCase = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
_lowercase = betas_for_alpha_bar(_snake_case )
_lowercase = 1.0 - self.betas
_lowercase = torch.cumprod(self.alphas , dim=0 )
_lowercase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowercase = 1.0
# setable values
_lowercase = None
_lowercase = torch.from_numpy(np.arange(0 , _snake_case )[::-1].copy() )
_lowercase = variance_type
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
return sample
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
_lowercase = num_inference_steps
_lowercase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowercase = (np.arange(0 , _snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowercase = torch.from_numpy(_snake_case ).to(_snake_case )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ):
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowercase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowercase = torch.log(torch.clamp(_snake_case , min=1E-20 ) )
_lowercase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowercase = variance.log()
_lowercase = beta.log()
_lowercase = (predicted_variance + 1) / 2
_lowercase = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = True , ):
_lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowercase = torch.split(_snake_case , sample.shape[1] , dim=1 )
else:
_lowercase = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
_lowercase = self.alphas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
_lowercase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowercase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowercase = torch.clamp(
_snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowercase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase = 0
if t > 0:
_lowercase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_snake_case , device=model_output.device )
_lowercase = self._get_variance(
_snake_case , predicted_variance=_snake_case , prev_timestep=_snake_case , )
if self.variance_type == "fixed_small_log":
_lowercase = variance
elif self.variance_type == "learned_range":
_lowercase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
_lowercase = variance * variance_noise
_lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_lowercase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_lowercase = timesteps.to(original_samples.device )
_lowercase = alphas_cumprod[timesteps] ** 0.5
_lowercase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowercase = sqrt_alpha_prod.unsqueeze(-1 )
_lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowercase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 287 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__snake_case: Tuple = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _snake_case ( A_ : str = "dhaka" , A_ : int = 5 ):
"""simple docstring"""
a_ : Any = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
a_ : Optional[int] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
a_ : int = requests.get("""https://www.google.com/search""" , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = BeautifulSoup(html.text , """html.parser""" )
a_ : Optional[int] = "".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
a_ : List[str] = json.dumps(_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = json.loads(_SCREAMING_SNAKE_CASE )
a_ : Dict = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
a_ : Optional[Any] = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(_SCREAMING_SNAKE_CASE ) , )
a_ : Dict = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
a_ : str = bytes(_SCREAMING_SNAKE_CASE , """ascii""" ).decode(
"""unicode-escape""" )
a_ : Any = bytes(_SCREAMING_SNAKE_CASE , """ascii""" ).decode(
"""unicode-escape""" )
a_ : int = urllib.request.build_opener()
a_ : Dict = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = f'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
__snake_case: Union[str, Any] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("Please provide a search term.")
raise
| 577 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
from math import sqrt
def _UpperCamelCase ( lowerCAmelCase_ ) ->bool:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
UpperCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
UpperCAmelCase = False
for divisor in range(2 , int(round(sqrt(_SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCAmelCase = False
break
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCAmelCase = list(range(2 , n + 1 ) )
UpperCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCAmelCase = 0
# filters actual prime numbers.
UpperCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->Tuple:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
UpperCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_SCREAMING_SNAKE_CASE ):
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
UpperCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
UpperCAmelCase = 2
UpperCAmelCase = number
if number == 0 or number == 1:
ans.append(_SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(_SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(_SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCAmelCase = 0
# prime factorization of 'number'
UpperCAmelCase = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = max(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->Union[str, Any]:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCAmelCase = 0
# prime factorization of 'number'
UpperCAmelCase = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = min(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->Tuple:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def _UpperCamelCase ( lowerCAmelCase_ ) ->Dict:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def _UpperCamelCase ( lowerCAmelCase_ ) ->Union[str, Any]:
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(_SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
UpperCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCAmelCase = get_prime_numbers(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
# run variable for while-loops.
UpperCAmelCase = 0
UpperCAmelCase = None
# exit variable. for break up the loops
UpperCAmelCase = True
while i < len_pn and loop:
UpperCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (len(_SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->str:
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCAmelCase = 0
while numbera != 0:
UpperCAmelCase = numbera % numbera
UpperCAmelCase = numbera
UpperCAmelCase = rest
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[Any]:
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCAmelCase = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = prime_factorization(_SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCAmelCase = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
UpperCAmelCase = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCAmelCase = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
UpperCAmelCase = 0
UpperCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and is_prime(
_SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[Any]:
assert (
is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(_SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCAmelCase = p_number_a + 1 # jump to the next number
UpperCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(_SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(_SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
UpperCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(_SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCAmelCase = get_divisors(_SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(_SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCAmelCase = gcd(abs(_SCREAMING_SNAKE_CASE ) , abs(_SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _UpperCamelCase ( lowerCAmelCase_ ) ->Dict:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
UpperCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[Any]:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
UpperCAmelCase = ans
ans += fiba
UpperCAmelCase = tmp
return ans
| 377 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFDeiTModel(config=_snake_case)
__SCREAMING_SNAKE_CASE = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(config=_snake_case)
__SCREAMING_SNAKE_CASE = model(_snake_case)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(_snake_case)
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = model(_snake_case)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(_snake_case)
__SCREAMING_SNAKE_CASE = model(_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(_snake_case)
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = model(_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowercase : Union[str, Any] = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowercase : Any = False
__lowercase : Dict = False
__lowercase : Tuple = False
__lowercase : List[str] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFDeiTModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_snake_case)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , tf.keras.layers.Dense))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_snake_case)
__SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False):
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def snake_case_ ( self):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFDeiTModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""")
if is_vision_available()
else None
)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""")
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=_snake_case , return_tensors="""tf""")
# forward pass
__SCREAMING_SNAKE_CASE = model(**_snake_case)
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , _snake_case)
__SCREAMING_SNAKE_CASE = tf.constant([-1.02_66, 0.19_12, -1.28_61])
self.assertTrue(np.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4))
| 155 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__a : List[str] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _SCREAMING_SNAKE_CASE ( __lowercase : Dict , __lowercase : Optional[int]=None ) -> List[str]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 637 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCamelCase_ : Tuple = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_snake_case )
from datasets import load_dataset
lowerCamelCase_ : Tuple = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCamelCase_ : Dict = dataset["train"][0]["image"].convert('''RGB''' )
lowerCamelCase_ : Optional[Any] = image_processor(_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Optional[int] = model(**_snake_case )
lowerCamelCase_ : Any = outputs.logits
lowerCamelCase_ : List[Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _snake_case )
lowerCamelCase_ : Dict = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 ) )
| 422 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = original_name.split("." )[0]
_A = key.split("." )
_A = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 2] )
_A = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 1] )
_A = orig_block_num - offset
_A = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __lowercase ( __lowercase ) -> List[Any]:
'''simple docstring'''
_A = OrderedDict()
_A = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_A = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_A = key[: key.find("proj" )]
_A = key.replace(_SCREAMING_SNAKE_CASE , F'''patch_embeddings.{total_embed_found}.''' )
_A = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_A = "poolformer.encoder." + key
if "mlp.fc1" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_A = key.replace("head" , "classifier" )
_A = value
return new_state_dict
def __lowercase ( ) -> List[str]:
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = PoolFormerConfig()
# set attributes based on model_name
_A = "huggingface/label-files"
_A = model_name[-3:]
_A = 1000
_A = "imagenet-1k-id2label.json"
_A = (1, 1000)
# set config attributes
_A = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
_A = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
if size == "s12":
_A = [2, 2, 6, 2]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 0.9
elif size == "s24":
_A = [4, 4, 12, 4]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 0.9
elif size == "s36":
_A = [6, 6, 18, 6]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 1e-6
_A = 0.9
elif size == "m36":
_A = [6, 6, 18, 6]
_A = [96, 192, 384, 768]
_A = 4.0
_A = 1e-6
_A = 0.95
elif size == "m48":
_A = [8, 8, 24, 8]
_A = [96, 192, 384, 768]
_A = 4.0
_A = 1e-6
_A = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_A = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
# Prepare image
_A = prepare_img()
_A = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
_A = rename_keys(_SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
_A = PoolFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
_A = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
_A = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_A = model(_SCREAMING_SNAKE_CASE )
_A = outputs.logits
# define expected logit slices for different models
if size == "s12":
_A = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_A = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_A = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_A = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_A = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 330 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
from math import factorial
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 5 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ = ["keras_nlp"]
def __init__(self , *lowercase__ , **lowercase__ ) -> List[Any]:
requires_backends(self , ['''keras_nlp'''] )
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 0 |
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = data
__lowerCAmelCase : List[str] = None
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=""" """ )
__lowerCAmelCase : Optional[Any] = temp.next
print()
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = Node(_snake_case )
__lowerCAmelCase : Optional[Any] = self.head
__lowerCAmelCase : List[str] = new_node
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__lowerCAmelCase : Any = self.head
while node_a is not None and node_a.data != node_data_a:
__lowerCAmelCase : List[Any] = node_a.next
__lowerCAmelCase : str = self.head
while node_a is not None and node_a.data != node_data_a:
__lowerCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
__lowerCAmelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
__UpperCAmelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 651 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 0 |
from __future__ import annotations
from random import choice
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return choice(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> int:
_lowercase = random_pivot(_SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
_lowercase = [e for e in lst if e < pivot]
_lowercase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(_SCREAMING_SNAKE_CASE , k - len(_SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 287 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , ):
'''simple docstring'''
a_ : List[str] = parent
a_ : Optional[int] = batch_size
a_ : int = seq_length
a_ : Optional[int] = is_training
a_ : Any = use_attention_mask
a_ : str = use_token_type_ids
a_ : Optional[Any] = use_labels
a_ : Union[str, Any] = vocab_size
a_ : List[str] = hidden_size
a_ : int = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : str = intermediate_size
a_ : Tuple = hidden_act
a_ : Optional[int] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : Any = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : Optional[int] = type_sequence_label_size
a_ : Union[str, Any] = initializer_range
a_ : Optional[int] = num_choices
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : int = None
if self.use_attention_mask:
a_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a_ : str = None
if self.use_token_type_ids:
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.prepare_config_and_inputs()
a_ : Union[str, Any] = config_and_inputs
a_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.prepare_config_and_inputs()
a_ : Dict = config_and_inputs
a_ : Any = True
a_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
a_ = True
a_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = FlaxRobertaModelTester(self )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a_ : List[str] = model_class_name.from_pretrained("""roberta-base""" , from_pt=_snake_case )
a_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 577 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 0 |
import re
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
if len(re.findall("""[ATCG]""" , _SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 377 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[Any] = StableDiffusionInpaintPipeline
__lowercase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowercase : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : Optional[Any] = frozenset([] )
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_snake_case)
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(_snake_case)
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
__SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_snake_case)).to(_snake_case)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(_snake_case)).convert("""RGB""").resize((6_4, 6_4))
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(image + 4)).convert("""RGB""").resize((6_4, 6_4))
if str(_snake_case).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_snake_case)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_snake_case).manual_seed(_snake_case)
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline(**_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_snake_case)
__SCREAMING_SNAKE_CASE = sd_pipe(**_snake_case).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case_ ( self):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""")
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""")
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""")
__SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-2-inpainting"
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(_snake_case , safety_checker=_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = "Face of a yellow cat, high resolution, sitting on a park bench"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , generator=_snake_case , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9E-3
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""")
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""")
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""")
__SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-2-inpainting"
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
_snake_case , torch_dtype=torch.floataa , safety_checker=_snake_case , )
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = "Face of a yellow cat, high resolution, sitting on a park bench"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , generator=_snake_case , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5E-1
def snake_case_ ( self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""")
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""")
__SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-2-inpainting"
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_pretrained(_snake_case , subfolder="""scheduler""")
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
_snake_case , safety_checker=_snake_case , scheduler=_snake_case , torch_dtype=torch.floataa , )
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = "Face of a yellow cat, high resolution, sitting on a park bench"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 155 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = AltDiffusionPipeline
__UpperCamelCase = TEXT_TO_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case: int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
snake_case: Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case: str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
snake_case: Union[str, Any] = CLIPTextModel(_snake_case )
snake_case: Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
snake_case: List[Any] = 77
snake_case: Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
if str(_snake_case ).startswith('mps' ):
snake_case: int = torch.manual_seed(_snake_case )
else:
snake_case: Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case: int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case: List[Any] = self.get_dummy_components()
torch.manual_seed(0 )
snake_case: Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case: Any = RobertaSeriesModelWithTransformation(_snake_case )
snake_case: int = text_encoder
snake_case: int = AltDiffusionPipeline(**_snake_case )
snake_case: Dict = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
snake_case: Any = self.get_dummy_inputs(_snake_case )
snake_case: Any = "A photo of an astronaut"
snake_case: Tuple = alt_pipe(**_snake_case )
snake_case: Union[str, Any] = output.images
snake_case: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case: int = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case: int = self.get_dummy_components()
snake_case: Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case )
torch.manual_seed(0 )
snake_case: List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case: Union[str, Any] = RobertaSeriesModelWithTransformation(_snake_case )
snake_case: Tuple = text_encoder
snake_case: List[str] = AltDiffusionPipeline(**_snake_case )
snake_case: int = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
snake_case: Dict = self.get_dummy_inputs(_snake_case )
snake_case: List[Any] = alt_pipe(**_snake_case )
snake_case: Optional[Any] = output.images
snake_case: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[int] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_snake_case )
snake_case: str = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
snake_case: str = "A painting of a squirrel eating a burger"
snake_case: Any = torch.manual_seed(0 )
snake_case: Optional[Any] = alt_pipe([prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
snake_case: str = output.images
snake_case: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Union[str, Any] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
snake_case: Union[str, Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_snake_case , safety_checker=_snake_case )
snake_case: int = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
snake_case: List[Any] = "A painting of a squirrel eating a burger"
snake_case: Optional[int] = torch.manual_seed(0 )
snake_case: Dict = alt_pipe([prompt] , generator=_snake_case , num_inference_steps=2 , output_type='numpy' )
snake_case: int = output.images
snake_case: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Union[str, Any] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = BlenderbotConfig
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = "gelu"
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict=13 , UpperCamelCase_ : Any=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Optional[Any]=99 , UpperCamelCase_ : str=32 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Optional[Any]=37 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : List[Any]=20 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Dict=0 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__A = tf.concat([input_ids, eos_tensor] , axis=1 )
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__A = prepare_blenderbot_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
__A = TFBlenderbotModel(config=_snake_case ).get_decoder()
__A = inputs_dict["input_ids"]
__A = input_ids[:1, :]
__A = inputs_dict["attention_mask"][:1, :]
__A = inputs_dict["head_mask"]
__A = 1
# first forward pass
__A = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
__A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__A = tf.concat([input_ids, next_tokens] , axis=-1 )
__A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__A = model(_snake_case , attention_mask=_snake_case )[0]
__A = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__A = output_from_no_past[:, -3:, random_slice_idx]
__A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1e-3 )
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : str=None , __lowercase : Any=None , __lowercase : Any=None , __lowercase : List[str]=None , __lowercase : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
__A = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = TFBlenderbotModelTester(self )
__A = ConfigTester(self , config_class=_snake_case )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["My friends are cool but they eat too many carbs."]
SCREAMING_SNAKE_CASE = "facebook/blenderbot-400M-distill"
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.tokenizer(self.src_text , return_tensors="""tf""" )
__A = self.model.generate(
model_inputs.input_ids , )
__A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 637 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
'''simple docstring'''
class __lowercase :
def __init__(self , A = "" , A = False ):
# Mapping from the first character of the prefix of the node
lowerCamelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCamelCase_ : List[Any] = is_leaf
lowerCamelCase_ : Union[str, Any] = prefix
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[Any] = 0
for q, w in zip(self.prefix , _snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase__ (self , A ):
for word in words:
self.insert(_snake_case )
def UpperCAmelCase__ (self , A ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowerCamelCase_ : Optional[int] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCamelCase_ : List[Any] = RadixNode(prefix=_snake_case , is_leaf=_snake_case )
else:
lowerCamelCase_ : str = self.nodes[word[0]]
lowerCamelCase_ : str = incoming_node.match(
_snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCamelCase_ : str = remaining_prefix
lowerCamelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCamelCase_ : int = RadixNode(_snake_case , _snake_case )
lowerCamelCase_ : Dict = aux_node
if remaining_word == "":
lowerCamelCase_ : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(_snake_case )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[Any] = self.nodes.get(word[0] , _snake_case )
if not incoming_node:
return False
else:
lowerCamelCase_ : Optional[int] = incoming_node.match(
_snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_snake_case )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : str = self.nodes.get(word[0] , _snake_case )
if not incoming_node:
return False
else:
lowerCamelCase_ : str = incoming_node.match(
_snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCamelCase_ : Dict = list(self.nodes.values() )[0]
lowerCamelCase_ : str = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCamelCase_ : List[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCamelCase_ : Tuple = False
# If there is 1 edge, we merge it with its child
else:
lowerCamelCase_ : Any = list(incoming_node.nodes.values() )[0]
lowerCamelCase_ : Dict = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCamelCase_ : Union[str, Any] = merging_node.nodes
return True
def UpperCAmelCase__ (self , A = 0 ):
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowercase_ ( ) -> bool:
'''simple docstring'''
lowerCamelCase_ : List[Any] = "banana bananas bandana band apple all beast".split()
lowerCamelCase_ : Union[str, Any] = RadixNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowercase_ ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowercase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase_ : Any = RadixNode()
lowerCamelCase_ : str = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(_SCREAMING_SNAKE_CASE )
print('''Words:''' , _SCREAMING_SNAKE_CASE )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 422 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase ) -> int:
'''simple docstring'''
return abs(_SCREAMING_SNAKE_CASE ) if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
def __lowercase ( __lowercase , __lowercase ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_A = y, x % y
return abs(_SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> str:
'''simple docstring'''
try:
_A = input("Enter two integers separated by comma (,): " ).split("," )
_A = int(nums[0] )
_A = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 330 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowercase = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
_lowerCAmelCase = self.diffusers_dir
shutil.copy(
os.path.join(_snake_case , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase=None ):
"""simple docstring"""
_lowerCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_lowerCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase = black.format_str(_snake_case , mode=_snake_case )
_lowerCAmelCase = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(_snake_case , """w""" , newline="""\n""" ) as f:
f.write(_snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_snake_case )
with open(_snake_case , """r""" ) as f:
self.assertTrue(f.read() , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(_snake_case , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , _snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , _snake_case ) , )
# Copy consistency with a really long name
_lowerCAmelCase = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub("""Bert""" , _snake_case , _snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , _snake_case , overwrite_result=re.sub("""DDPM""" , """Test""" , _snake_case ) , )
| 5 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 0 |
import re
def __a ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__UpperCAmelCase = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 303 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase : Optional[int] ="timm_backbone"
def __init__( self : Dict , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : int=3 , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(**_snake_case )
__lowerCAmelCase : Tuple = backbone
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : str = features_only
__lowerCAmelCase : int = use_pretrained_backbone
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : str = out_indices if out_indices is not None else (-1,)
| 651 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , ):
_lowercase = size if size is not None else {"height": 18, "width": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
def UpperCamelCase_ ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class a_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
a : Dict = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
_lowercase = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , """clusters""" ) )
self.assertTrue(hasattr(_snake_case , """do_resize""" ) )
self.assertTrue(hasattr(_snake_case , """size""" ) )
self.assertTrue(hasattr(_snake_case , """do_normalize""" ) )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
_lowercase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , obj[key] ) )
else:
self.assertEqual(obj[key] , _snake_case )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = os.path.join(_snake_case , """image_processor.json""" )
image_processor_first.to_json_file(_snake_case )
_lowercase = self.image_processing_class.from_json_file(_snake_case ).to_dict()
_lowercase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _snake_case )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_snake_case )
_lowercase = self.image_processing_class.from_pretrained(_snake_case ).to_dict()
_lowercase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _snake_case )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase__ ( ) -> List[Any]:
_lowercase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_lowercase = Image.open(dataset[4]["""file"""] )
_lowercase = Image.open(dataset[5]["""file"""] )
_lowercase = [imagea, imagea]
return images
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ):
_lowercase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_lowercase = prepare_images()
# test non-batched
_lowercase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
_lowercase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _snake_case )
# test batched
_lowercase = image_processing(_snake_case , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
_lowercase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _snake_case ) | 287 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=99 , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=9 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_=8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.002 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
a_ : List[str] = parent
a_ : List[str] = batch_size
a_ : int = encoder_seq_length
a_ : int = decoder_seq_length
# For common tests
a_ : Any = self.decoder_seq_length
a_ : str = is_training
a_ : List[Any] = use_attention_mask
a_ : List[str] = use_labels
a_ : Union[str, Any] = vocab_size
a_ : Any = hidden_size
a_ : Any = num_hidden_layers
a_ : Any = num_attention_heads
a_ : Any = d_ff
a_ : List[Any] = relative_attention_num_buckets
a_ : str = dropout_rate
a_ : Optional[int] = initializer_factor
a_ : Union[str, Any] = eos_token_id
a_ : int = pad_token_id
a_ : List[str] = decoder_start_token_id
a_ : List[Any] = None
a_ : Union[str, Any] = decoder_layers
def _lowerCAmelCase ( self ):
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
a_ : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a_ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a_ : List[str] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case )
if decoder_head_mask is None:
a_ : str = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
a_ : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
a_ : str = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a_ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
a_ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
a_ : Any = self.get_config()
a_ : Union[str, Any] = config.num_attention_heads
a_ : List[Any] = self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, input_dict
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : int = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
a_ : Union[str, Any] = model(
input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , )
a_ : str = model(input_ids=_snake_case , decoder_input_ids=_snake_case )
a_ : Union[str, Any] = result.last_hidden_state
a_ : List[Any] = result.past_key_values
a_ : Any = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Dict = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
a_ : Dict = model(_snake_case , use_cache=_snake_case )
a_ : int = model(_snake_case )
a_ : Optional[int] = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
a_ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
a_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ : Dict = model(_snake_case )["last_hidden_state"]
a_ : Dict = model(_snake_case , past_key_values=_snake_case )["last_hidden_state"]
# select random slice
a_ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
a_ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Dict = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
a_ : List[str] = model(**_snake_case )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
a_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = True
a_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ = [0.8, 0.9]
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
a_ : Union[str, Any] = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_snake_case , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
a_ : str = config_and_inputs[0]
a_ : Optional[int] = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
a_ : Optional[Any] = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=_snake_case ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ):
a_ : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
a_ : str = torch.ones(
config.num_decoder_layers , config.num_heads , device=_snake_case )
a_ : int = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
a_ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_snake_case ).to(_snake_case )
a_ : str = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_snake_case , legacy=_snake_case )
a_ : Optional[int] = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
a_ : Tuple = tokenizer(_snake_case , return_tensors="""pt""" , padding=_snake_case ).input_ids
# fmt: off
a_ : Any = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case , _snake_case )
a_ : int = model.generate(input_ids.to(_snake_case ) )
a_ : int = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
a_ : Dict = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 577 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase = CanineTokenizer
UpperCamelCase = False
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase ( self : int ) -> int:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def _lowercase ( self : int , **__lowerCamelCase : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
UpperCAmelCase = 1_0_2_4
return tokenizer
@require_torch
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.canine_tokenizer
UpperCAmelCase = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase = tokenizer(_snake_case , padding=_snake_case , return_tensors="""pt""" )
self.assertIsInstance(_snake_case , _snake_case )
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_snake_case , _snake_case )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.canine_tokenizer
UpperCAmelCase = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase = tokenizer(_snake_case , padding=_snake_case , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _snake_case )
self.assertIn("""attention_mask""" , _snake_case )
self.assertIn("""token_type_ids""" , _snake_case )
@require_torch
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.canine_tokenizer
UpperCAmelCase = [
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase = tokenizer(
text_target=_snake_case , max_length=3_2 , padding="""max_length""" , truncation=_snake_case , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
UpperCAmelCase = tokenizer.__class__.from_pretrained(_snake_case )
UpperCAmelCase = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
shutil.rmtree(_snake_case )
UpperCAmelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase = chr(0Xe007 )
additional_special_tokens.append(_snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
UpperCAmelCase = tokenizer.__class__.from_pretrained(_snake_case )
UpperCAmelCase = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
self.assertIn(_snake_case , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_snake_case )
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = self.get_clean_sequence(_snake_case )
# a special token for Canine can be defined as follows:
UpperCAmelCase = 0Xe005
UpperCAmelCase = chr(_snake_case )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertEqual(len(_snake_case ) , 1 )
UpperCAmelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_snake_case )
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertEqual(_snake_case , input_encoded + special_token_id )
UpperCAmelCase = tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
self.assertTrue(special_token not in decoded )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = chr(0Xe005 )
UpperCAmelCase = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_snake_case )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
UpperCAmelCase = tokenizer.tokenize(_snake_case )
UpperCAmelCase = tokenizer.tokenize(_snake_case )
self.assertEqual(len(_snake_case ) , 1 )
self.assertEqual(len(_snake_case ) , 1 )
self.assertEqual(token_a[0] , _snake_case )
self.assertEqual(token_a[0] , _snake_case )
@require_tokenizers
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
UpperCAmelCase = 0Xe006
UpperCAmelCase = chr(_snake_case )
UpperCAmelCase = AddedToken(_snake_case , lstrip=_snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_snake_case )
tokenizer.from_pretrained(_snake_case )
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
with open(os.path.join(_snake_case , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCAmelCase = json.load(_snake_case )
with open(os.path.join(_snake_case , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCAmelCase = json.load(_snake_case )
# a special token for Canine can be defined as follows:
UpperCAmelCase = 0Xe006
UpperCAmelCase = chr(_snake_case )
UpperCAmelCase = [new_token_a]
UpperCAmelCase = [new_token_a]
with open(os.path.join(_snake_case , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_snake_case , _snake_case )
with open(os.path.join(_snake_case , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_snake_case , _snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(_snake_case , extra_ids=0 )
self.assertIn(_snake_case , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase = 0Xe007
UpperCAmelCase = chr(_snake_case )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = [AddedToken(_snake_case , lstrip=_snake_case )]
UpperCAmelCase = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , extra_ids=0 )
self.assertIn(_snake_case , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = "hello world"
if self.space_between_special_tokens:
UpperCAmelCase = "[CLS] hello world [SEP]"
else:
UpperCAmelCase = input
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
UpperCAmelCase = tokenizer.decode(_snake_case , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_snake_case , [output, output.lower()] )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase = "a"
UpperCAmelCase = ord(_snake_case )
for attr in attributes_list:
setattr(_snake_case , attr + """_id""" , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + """_id""" ) , _snake_case )
setattr(_snake_case , attr + """_id""" , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + """_id""" ) , _snake_case )
setattr(_snake_case , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens_ids""" ) , [] )
UpperCAmelCase = 0Xe006
UpperCAmelCase = chr(_snake_case )
setattr(_snake_case , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
pass
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
def _lowercase ( self : int ) -> int:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
| 377 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 0 |
"""simple docstring"""
__magic_name__ = 8.314_4598
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__magic_name__ = 300
__magic_name__ = 28
__magic_name__ = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 155 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "pix2struct_text_model"
__UpperCamelCase = ["past_key_values"]
__UpperCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=5_02_44 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Dict = vocab_size
snake_case: List[str] = hidden_size
snake_case: Optional[int] = d_kv
snake_case: List[str] = d_ff
snake_case: Optional[Any] = num_layers
snake_case: int = num_heads
snake_case: List[Any] = relative_attention_num_buckets
snake_case: str = relative_attention_max_distance
snake_case: Any = dropout_rate
snake_case: Any = layer_norm_epsilon
snake_case: List[str] = initializer_factor
snake_case: Tuple = use_cache
snake_case: Union[str, Any] = eos_token_id
snake_case: List[Any] = decoder_start_token_id
# for backwards compatibility
snake_case: List[Any] = dense_act_fn
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , tie_word_embeddings=_snake_case , is_decoder=_snake_case , **_snake_case , )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
cls._set_token_in_kwargs(_snake_case )
snake_case: Tuple = cls.get_config_dict(_snake_case , **_snake_case )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case: Tuple = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "pix2struct_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1E-10 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**_snake_case )
snake_case: Optional[int] = hidden_size
snake_case: List[str] = patch_embed_hidden_size
snake_case: List[str] = d_ff
snake_case: int = dropout_rate
snake_case: Optional[int] = num_hidden_layers
snake_case: Any = num_attention_heads
snake_case: Dict = initializer_range
snake_case: List[str] = initializer_factor
snake_case: Union[str, Any] = attention_dropout
snake_case: Dict = layer_norm_eps
snake_case: Optional[int] = dense_act_fn
snake_case: Union[str, Any] = seq_len
snake_case: Tuple = relative_attention_num_buckets
snake_case: int = relative_attention_max_distance
snake_case: Dict = d_kv
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
cls._set_token_in_kwargs(_snake_case )
snake_case: Optional[int] = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case: List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "pix2struct"
__UpperCamelCase = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=_snake_case , is_encoder_decoder=_snake_case , **_snake_case )
if text_config is None:
snake_case: List[Any] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
snake_case: List[Any] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
snake_case: Tuple = PixaStructTextConfig(**_snake_case )
snake_case: str = PixaStructVisionConfig(**_snake_case )
snake_case: Union[str, Any] = self.text_config.decoder_start_token_id
snake_case: str = self.text_config.pad_token_id
snake_case: str = self.text_config.eos_token_id
snake_case: List[str] = initializer_factor
snake_case: List[Any] = initializer_range
snake_case: Optional[Any] = self.initializer_range
snake_case: List[Any] = self.initializer_range
snake_case: Union[str, Any] = is_vqa
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = copy.deepcopy(self.__dict__ )
snake_case: Any = self.text_config.to_dict()
snake_case: List[Any] = self.vision_config.to_dict()
snake_case: Optional[int] = self.__class__.model_type
return output | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 0 |
from collections.abc import Callable
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] = None ):
"""simple docstring"""
__A = []
# Stores indexes of each item for supporting updates and deletion.
__A = {}
# Stores current size of heap.
__A = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__A = key or (lambda UpperCamelCase_ : x)
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Any ):
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Union[str, Any] ):
"""simple docstring"""
__A = int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ):
"""simple docstring"""
__A = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__A = self.arr[j], self.arr[i]
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self._left(_snake_case )
__A = self._right(_snake_case )
__A = i
if left is not None and not self._cmp(_snake_case , _snake_case ):
__A = left
if right is not None and not self._cmp(_snake_case , _snake_case ):
__A = right
return valid_parent
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Union[str, Any] ):
"""simple docstring"""
__A = self._parent(_snake_case )
while parent is not None and not self._cmp(_snake_case , _snake_case ):
self._swap(_snake_case , _snake_case )
__A = parent, self._parent(_snake_case )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self._get_valid_parent(_snake_case )
while valid_parent != index:
self._swap(_snake_case , _snake_case )
__A = valid_parent, self._get_valid_parent(_snake_case )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int ):
"""simple docstring"""
if item not in self.pos_map:
return
__A = self.pos_map[item]
__A = [item, self.key(_snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Dict ):
"""simple docstring"""
if item not in self.pos_map:
return
__A = self.pos_map[item]
del self.pos_map[item]
__A = self.arr[self.size - 1]
__A = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : int ):
"""simple docstring"""
__A = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_snake_case )] )
else:
__A = [item, self.key(_snake_case )]
__A = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return self.arr[0] if self.size else None
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowercase : List[str] = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowercase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = cn.convert_to_negative(_SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def lowercase_ ( ) -> Dict:
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Any = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ : Optional[int] = canny.canny(_SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ : Any = conv.img_convolve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
assert res.any()
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
assert med.median_filter(_SCREAMING_SNAKE_CASE , 3 ).any()
def lowercase_ ( ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = sob.sobel_filter(_SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = sp.make_sepia(_SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def lowercase_ ( _lowercase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : List[str] = bs.Burkes(imread(_SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowercase_ ( _lowercase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : str = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : List[Any] = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
lowerCamelCase_ : Optional[int] = imread(_SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : str = 0
lowerCamelCase_ : Any = image[x_coordinate][y_coordinate]
lowerCamelCase_ : Optional[Any] = lbp.get_neighbors_pixel(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ : int = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ : List[str] = lbp.local_binary_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 422 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_SCREAMING_SNAKE_CASE , n - 1 , _SCREAMING_SNAKE_CASE ) * a) % mod
else:
_A = binary_exponentiation(_SCREAMING_SNAKE_CASE , n / 2 , _SCREAMING_SNAKE_CASE )
return (b * b) % mod
# a prime number
lowerCamelCase_ = 7_01
lowerCamelCase_ = 10_00_00_00_00
lowerCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 330 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A (__lowerCamelCase :Dict ):
_lowerCAmelCase = image.size
_lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
_lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
_lowerCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self , _lowercase = None , _lowercase = 1 , _lowercase = 100 , _lowercase = 0.0 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
"""simple docstring"""
if isinstance(_snake_case , PIL.Image.Image ):
_lowerCAmelCase = 1
elif isinstance(_snake_case , torch.Tensor ):
_lowerCAmelCase = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case )}' )
if isinstance(_snake_case , PIL.Image.Image ):
_lowerCAmelCase = preprocess(_snake_case )
_lowerCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_lowerCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_lowerCAmelCase = next(self.unet.parameters() ).dtype
_lowerCAmelCase = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
_lowerCAmelCase = image.to(device=self.device , dtype=_snake_case )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_snake_case , device=self.device )
_lowerCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase = {}
if accepts_eta:
_lowerCAmelCase = eta
for t in self.progress_bar(_snake_case ):
# concat latents and low resolution image in the channel dimension.
_lowerCAmelCase = torch.cat([latents, image] , dim=1 )
_lowerCAmelCase = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
_lowerCAmelCase = self.unet(_snake_case , _snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# decode the image latents with the VQVAE
_lowerCAmelCase = self.vqvae.decode(_snake_case ).sample
_lowerCAmelCase = torch.clamp(_snake_case , -1.0 , 1.0 )
_lowerCAmelCase = image / 2 + 0.5
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 5 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if index == r:
for j in range(_SCREAMING_SNAKE_CASE ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase = arr[i]
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
A_ : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any =AudioLDMPipeline
lowerCamelCase : Dict =TEXT_TO_AUDIO_PARAMS
lowerCamelCase : Any =TEXT_TO_AUDIO_BATCH_PARAMS
lowerCamelCase : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
__lowerCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
__lowerCAmelCase : Optional[Any] = ClapTextModelWithProjection(_snake_case )
__lowerCAmelCase : List[Any] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__lowerCAmelCase : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
__lowerCAmelCase : Union[str, Any] = SpeechTaHifiGan(_snake_case )
__lowerCAmelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
__lowerCAmelCase : Optional[int] = torch.manual_seed(_snake_case )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCAmelCase : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : Optional[Any] = AudioLDMPipeline(**_snake_case )
__lowerCAmelCase : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : List[str] = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Any = audioldm_pipe(**_snake_case )
__lowerCAmelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
__lowerCAmelCase : Any = audio[:10]
__lowerCAmelCase : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : int = AudioLDMPipeline(**_snake_case )
__lowerCAmelCase : Dict = audioldm_pipe.to(_snake_case )
__lowerCAmelCase : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Tuple = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : Any = audioldm_pipe(**_snake_case )
__lowerCAmelCase : List[str] = output.audios[0]
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : str = 3 * [inputs.pop("""prompt""" )]
__lowerCAmelCase : str = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
__lowerCAmelCase : Dict = text_inputs["input_ids"].to(_snake_case )
__lowerCAmelCase : str = audioldm_pipe.text_encoder(
_snake_case , )
__lowerCAmelCase : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowerCAmelCase : Tuple = F.normalize(_snake_case , dim=-1 )
__lowerCAmelCase : int = prompt_embeds
# forward
__lowerCAmelCase : int = audioldm_pipe(**_snake_case )
__lowerCAmelCase : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_dummy_components()
__lowerCAmelCase : Tuple = AudioLDMPipeline(**_snake_case )
__lowerCAmelCase : List[Any] = audioldm_pipe.to(_snake_case )
__lowerCAmelCase : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Optional[int] = 3 * ["this is a negative prompt"]
__lowerCAmelCase : Any = negative_prompt
__lowerCAmelCase : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : Dict = audioldm_pipe(**_snake_case )
__lowerCAmelCase : Dict = output.audios[0]
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__lowerCAmelCase : List[Any] = []
for p in [prompt, negative_prompt]:
__lowerCAmelCase : Any = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
__lowerCAmelCase : List[Any] = text_inputs["input_ids"].to(_snake_case )
__lowerCAmelCase : str = audioldm_pipe.text_encoder(
_snake_case , )
__lowerCAmelCase : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowerCAmelCase : Any = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
__lowerCAmelCase : List[Any] = embeds
# forward
__lowerCAmelCase : Tuple = audioldm_pipe(**_snake_case )
__lowerCAmelCase : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Optional[Any] = self.get_dummy_components()
__lowerCAmelCase : Any = PNDMScheduler(skip_prk_steps=_snake_case )
__lowerCAmelCase : Optional[Any] = AudioLDMPipeline(**_snake_case )
__lowerCAmelCase : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Any = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : int = "egg cracking"
__lowerCAmelCase : Optional[Any] = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
__lowerCAmelCase : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
__lowerCAmelCase : List[Any] = audio[:10]
__lowerCAmelCase : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : List[str] = self.get_dummy_components()
__lowerCAmelCase : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
__lowerCAmelCase : Any = AudioLDMPipeline(**_snake_case )
__lowerCAmelCase : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
__lowerCAmelCase : Any = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : List[Any] = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Optional[int] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Optional[Any] = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = AudioLDMPipeline(**_snake_case )
__lowerCAmelCase : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
__lowerCAmelCase : Any = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
__lowerCAmelCase : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
__lowerCAmelCase : List[Any] = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
__lowerCAmelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : str = AudioLDMPipeline(**_snake_case )
__lowerCAmelCase : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : int = ["hey"]
__lowerCAmelCase : Dict = audioldm_pipe(_snake_case , num_inference_steps=1 )
__lowerCAmelCase : Any = output.audios.shape
assert audio_shape == (1, 2_56)
__lowerCAmelCase : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__lowerCAmelCase : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
__lowerCAmelCase : Tuple = audioldm_pipe(_snake_case , num_inference_steps=1 )
__lowerCAmelCase : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int="cpu" , lowerCAmelCase : Dict=torch.floataa , lowerCAmelCase : str=0 ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCAmelCase : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
__lowerCAmelCase : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
__lowerCAmelCase : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__lowerCAmelCase : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : List[Any] = self.get_inputs(_snake_case )
__lowerCAmelCase : List[Any] = 25
__lowerCAmelCase : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
__lowerCAmelCase : Union[str, Any] = audio[7_72_30:7_72_40]
__lowerCAmelCase : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__lowerCAmelCase : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__lowerCAmelCase : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Tuple = self.get_inputs(_snake_case )
__lowerCAmelCase : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
__lowerCAmelCase : Any = audio[2_77_80:2_77_90]
__lowerCAmelCase : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__lowerCAmelCase : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 651 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class a_ :
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
pass
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
_lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowercase = TFVisionTextDualEncoderModel(_snake_case )
_lowercase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
_lowercase = self.get_vision_text_model(_snake_case , _snake_case )
_lowercase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowercase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
_lowercase = self.get_vision_text_model(_snake_case , _snake_case )
_lowercase = {"vision_model": vision_model, "text_model": text_model}
_lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowercase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
_lowercase = self.get_vision_text_model(_snake_case , _snake_case )
_lowercase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowercase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowercase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowercase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowercase = after_output[0].numpy()
_lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
_lowercase = self.get_vision_text_model(_snake_case , _snake_case )
_lowercase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowercase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowercase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase = to_atuple(vision_model.config.image_size )
_lowercase = to_atuple(vision_model.config.patch_size )
_lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowercase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
_lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def UpperCamelCase_ ( self ):
_lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def UpperCamelCase_ ( self ):
_lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def UpperCamelCase_ ( self ):
_lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def UpperCamelCase_ ( self ):
_lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def UpperCamelCase_ ( self ):
_lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def UpperCamelCase_ ( self ):
_lowercase = self.get_pretrained_model_and_inputs()
_lowercase = model_a(**_snake_case )
_lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowercase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowercase = model_a(**_snake_case )
_lowercase = after_outputs[0].numpy()
_lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
@require_tf
class a_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
def UpperCamelCase_ ( self ):
_lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_lowercase = 13
_lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowercase = random_attention_mask([batch_size, 4] )
_lowercase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
_lowercase = TFViTModel(_snake_case , name="""vision_model""" )
_lowercase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def UpperCamelCase_ ( self ):
_lowercase = TFViTModelTester(self )
_lowercase = TFBertModelTester(self )
_lowercase = vit_model_tester.prepare_config_and_inputs()
_lowercase = bert_model_tester.prepare_config_and_inputs()
_lowercase = vision_config_and_inputs
(
_lowercase
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
def UpperCamelCase_ ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_lowercase = 13
_lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowercase = random_attention_mask([batch_size, 4] )
_lowercase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
_lowercase = self.get_vision_text_model(_snake_case , _snake_case )
_lowercase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowercase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowercase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowercase = to_atuple(vision_model.config.image_size )
_lowercase = to_atuple(vision_model.config.patch_size )
_lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowercase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
_lowercase = TFDeiTModel(_snake_case , name="""vision_model""" )
_lowercase = TFRobertaModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def UpperCamelCase_ ( self ):
_lowercase = TFDeiTModelTester(self )
_lowercase = TFRobertaModelTester(self )
_lowercase = vit_model_tester.prepare_config_and_inputs()
_lowercase = bert_model_tester.prepare_config_and_inputs()
_lowercase = vision_config_and_inputs
(
_lowercase
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
def UpperCamelCase_ ( self ):
_lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_lowercase = 13
_lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowercase = random_attention_mask([batch_size, 4] )
_lowercase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
_lowercase = TFCLIPVisionModel(_snake_case , name="""vision_model""" )
_lowercase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def UpperCamelCase_ ( self ):
_lowercase = TFCLIPVisionModelTester(self )
_lowercase = TFBertModelTester(self )
_lowercase = clip_model_tester.prepare_config_and_inputs()
_lowercase = bert_model_tester.prepare_config_and_inputs()
_lowercase = vision_config_and_inputs
(
_lowercase
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class a_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ):
_lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_snake_case , padding=_snake_case , return_tensors="""np""" )
_lowercase = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowercase = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1E-3 ) ) | 287 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case: List[Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
a_ = DebertaVaTokenizer
a_ = DebertaVaTokenizerFast
a_ = True
a_ = True
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a_ : Optional[int] = DebertaVaTokenizer(_snake_case , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] = "this is a test"
a_ : Optional[Any] = "this is a test"
return input_text, output_text
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = "<pad>"
a_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(_snake_case ) , 3_00_01 )
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = " \tHeLLo!how \n Are yoU? "
a_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
a_ : Tuple = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case )
a_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
a_ : Tuple = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case )
a_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = "I was born in 92000, and this is falsé."
a_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a_ : List[Any] = DebertaVaTokenizer(_snake_case , split_by_punct=_snake_case )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
a_ : int = DebertaVaTokenizerFast(_snake_case , split_by_punct=_snake_case )
a_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = "I was born in 92000, and this is falsé."
a_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a_ : Optional[Any] = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
a_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
a_ : List[Any] = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
a_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = "I was born in 92000, and this is falsé."
a_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a_ : List[Any] = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
a_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
a_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
a_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = "I was born in 92000, and this is falsé."
a_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a_ : List[str] = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
a_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
a_ : Dict = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
a_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = " \tHeLLo!how \n Are yoU? "
a_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
a_ : Any = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
a_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
a_ : int = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
a_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.get_tokenizer()
a_ : Union[str, Any] = self.get_rust_tokenizer()
a_ : Dict = "I was born in 92000, and this is falsé."
a_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
a_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
a_ : Tuple = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
a_ : int = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : Optional[Any] = self.get_rust_tokenizer()
a_ : Union[str, Any] = tokenizer.encode(_snake_case )
a_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = "This is a test"
a_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
a_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
a_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
a_ : str = DebertaVaTokenizer(_snake_case , keep_accents=_snake_case )
a_ : List[Any] = DebertaVaTokenizerFast(_snake_case , keep_accents=_snake_case )
a_ : Optional[int] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : List[Any] = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# fmt: off
a_ : List[str] = "I was born in 92000, and this is falsé."
a_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
a_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
a_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a_ : List[str] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : Optional[int] = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = DebertaVaTokenizer(_snake_case )
a_ : Optional[int] = tokenizer.encode("""sequence builders""" )
a_ : Dict = tokenizer.encode("""multi-sequence build""" )
a_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
a_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _snake_case , )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 577 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 0 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->tuple[int, int]:
try:
UpperCAmelCase = float(_SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError("""Please enter a valid number""" )
UpperCAmelCase = decimal - int(_SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(_SCREAMING_SNAKE_CASE ), 1
else:
UpperCAmelCase = len(str(_SCREAMING_SNAKE_CASE ).split(""".""" )[1] )
UpperCAmelCase = int(decimal * (1_0**number_of_frac_digits) )
UpperCAmelCase = 1_0**number_of_frac_digits
UpperCAmelCase = denominator, numerator
while True:
UpperCAmelCase = dividend % divisor
if remainder == 0:
break
UpperCAmelCase = divisor, remainder
UpperCAmelCase = numerator / divisor, denominator / divisor
return int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction("67") = }""")
print(F"""{decimal_to_fraction("45.0") = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction("6.25") = }""")
print(F"""{decimal_to_fraction("78td") = }""")
| 377 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__magic_name__ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
__SCREAMING_SNAKE_CASE = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(f"Loading PyTorch weights from {pt_path}" )
__SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
__SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__SCREAMING_SNAKE_CASE = convert_pytorch_sharded_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return flax_state_dict
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
def is_key_or_prefix_key_in_dict(UpperCamelCase_ ) -> bool:
return len(set(_SCREAMING_SNAKE_CASE ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__SCREAMING_SNAKE_CASE = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__SCREAMING_SNAKE_CASE = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__SCREAMING_SNAKE_CASE = pt_tuple_key[-2] + "_v"
if name is not None:
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {k: v.numpy() for k, v in pt_state_dict.items()}
__SCREAMING_SNAKE_CASE = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__SCREAMING_SNAKE_CASE = flax_model.params["params"]
else:
__SCREAMING_SNAKE_CASE = flax_model.params
__SCREAMING_SNAKE_CASE = flatten_dict(_SCREAMING_SNAKE_CASE )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__SCREAMING_SNAKE_CASE = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__SCREAMING_SNAKE_CASE = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__SCREAMING_SNAKE_CASE = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__SCREAMING_SNAKE_CASE = pt_tuple_key[1:]
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE = rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
__SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__SCREAMING_SNAKE_CASE = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE = jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
import torch
# Load the index
__SCREAMING_SNAKE_CASE = {}
for shard_file in shard_filenames:
# load using msgpack utils
__SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {k: v.numpy() for k, v in pt_state_dict.items()}
__SCREAMING_SNAKE_CASE = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__SCREAMING_SNAKE_CASE = flax_model.params["params"]
__SCREAMING_SNAKE_CASE = flatten_dict(_SCREAMING_SNAKE_CASE )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
__SCREAMING_SNAKE_CASE = flax_model.params
__SCREAMING_SNAKE_CASE = flatten_dict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__SCREAMING_SNAKE_CASE = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__SCREAMING_SNAKE_CASE = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__SCREAMING_SNAKE_CASE = pt_tuple_key[1:]
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE = rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
__SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__SCREAMING_SNAKE_CASE = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
if "var" in flax_key[-1]:
__SCREAMING_SNAKE_CASE = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE = jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(f"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
__SCREAMING_SNAKE_CASE = getattr(_SCREAMING_SNAKE_CASE , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as state_f:
try:
__SCREAMING_SNAKE_CASE = from_bytes(_SCREAMING_SNAKE_CASE , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__SCREAMING_SNAKE_CASE = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase_ : x.dtype == jnp.bfloataa , _SCREAMING_SNAKE_CASE ) ).values()
if any(_SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__SCREAMING_SNAKE_CASE = jax.tree_util.tree_map(
lambda UpperCamelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = flatten_dict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pt_model.state_dict()
__SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
__SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__SCREAMING_SNAKE_CASE = flax_key_tuple[0] == pt_model.base_model_prefix
__SCREAMING_SNAKE_CASE = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__SCREAMING_SNAKE_CASE = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# conv layer
__SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ("weight",)
__SCREAMING_SNAKE_CASE = jnp.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# linear layer
__SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ("weight",)
__SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
__SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
__SCREAMING_SNAKE_CASE = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__SCREAMING_SNAKE_CASE = ".".join(_SCREAMING_SNAKE_CASE )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__SCREAMING_SNAKE_CASE = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__SCREAMING_SNAKE_CASE = key.split(""".""" )
__SCREAMING_SNAKE_CASE = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__SCREAMING_SNAKE_CASE = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
__SCREAMING_SNAKE_CASE = key_components[-2] + "_v"
if name is not None:
__SCREAMING_SNAKE_CASE = key_components[:-3] + [name]
__SCREAMING_SNAKE_CASE = ".".join(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = key
if flax_key in special_pt_names:
__SCREAMING_SNAKE_CASE = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__SCREAMING_SNAKE_CASE = np.asarray(_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__SCREAMING_SNAKE_CASE = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(_SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__SCREAMING_SNAKE_CASE = list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"""If your task is similar to the task the model of the checkpoint was trained on, """
f"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 155 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__UpperCAmelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCAmelCase_ ( __A : Any , __A : List[str] ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( __A : Dict , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[int] = tmp_path_factory.getbasetemp() / "cache"
snake_case: int = test_hf_cache_home / "datasets"
snake_case: List[Any] = test_hf_cache_home / "metrics"
snake_case: str = test_hf_cache_home / "modules"
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
snake_case: int = test_hf_datasets_cache / "downloads"
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) )
snake_case: Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( __A : List[Any] ):
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE ) | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__a : Optional[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : Optional[int]=None , __lowercase : Dict=None , __lowercase : Any=None ) -> int:
"""simple docstring"""
__A = True
while ask_again:
__A = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : str=[] , __lowercase : int=None , __lowercase : Union[str, Any]=0 ) -> List[Any]:
"""simple docstring"""
__A = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def _SCREAMING_SNAKE_CASE ( __lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__A = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
__A = int(_SCREAMING_SNAKE_CASE )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> Dict:
"""simple docstring"""
__A = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _SCREAMING_SNAKE_CASE ( __lowercase : Any ) -> List[str]:
"""simple docstring"""
__A = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> Tuple:
"""simple docstring"""
__A = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowercase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ):
"""simple docstring"""
__A = super()._format_usage(_snake_case , _snake_case , _snake_case , _snake_case )
__A = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 637 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCAmelCase : Optional[int] = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _A( cls ):
lowercase =TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def _A( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _A( self ):
lowercase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase =FlaxBertModel(snake_case_ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowercase =FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
lowercase =flatten_dict(unfreeze(model.params ) )
lowercase =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1E-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ , repo_id='''test-model-flax''' , push_to_hub=snake_case_ , use_auth_token=self._token )
lowercase =FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
lowercase =flatten_dict(unfreeze(model.params ) )
lowercase =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1E-3 , msg=f'{key} not identical' )
def _A( self ):
lowercase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase =FlaxBertModel(snake_case_ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowercase =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase =flatten_dict(unfreeze(model.params ) )
lowercase =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1E-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
snake_case_ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=snake_case_ , use_auth_token=self._token )
lowercase =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase =flatten_dict(unfreeze(model.params ) )
lowercase =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1E-3 , msg=f'{key} not identical' )
def UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : str ) -> Any:
'''simple docstring'''
lowercase =True
lowercase =flatten_dict(modela.params )
lowercase =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowercase =False
return models_are_equal
@require_flax
class __magic_name__ ( unittest.TestCase ):
def _A( self ):
lowercase =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase =FlaxBertModel(snake_case_ )
lowercase ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case_ , snake_case_ ) )
with self.assertRaises(snake_case_ ):
lowercase =FlaxBertModel.from_pretrained(snake_case_ )
lowercase =FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertTrue(check_models_equal(snake_case_ , snake_case_ ) )
def _A( self ):
lowercase =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase =FlaxBertModel(snake_case_ )
lowercase ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case_ , snake_case_ ) , max_shard_size='''10KB''' )
with self.assertRaises(snake_case_ ):
lowercase =FlaxBertModel.from_pretrained(snake_case_ )
lowercase =FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertTrue(check_models_equal(snake_case_ , snake_case_ ) )
def _A( self ):
lowercase ='''bert'''
lowercase ='''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(snake_case_ ):
lowercase =FlaxBertModel.from_pretrained(snake_case_ )
lowercase =FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertIsNotNone(snake_case_ )
def _A( self ):
lowercase ='''bert'''
lowercase ='''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(snake_case_ ):
lowercase =FlaxBertModel.from_pretrained(snake_case_ )
lowercase =FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertIsNotNone(snake_case_ )
| 72 |
'''simple docstring'''
from math import pi, sqrt, tan
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowercase =(height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(lowercase_ , 2 ) * torus_radius * tube_radius
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowercase =(sidea + sidea + sidea) / 2
lowercase =sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase ( lowercase_ : int , lowercase_ : float ) -> float:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 72 | 1 |
'''simple docstring'''
_UpperCAmelCase : Dict = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 72 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = BarthezTokenizer
UpperCamelCase__ = BarthezTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def _A( self ):
super().setUp()
lowercase =BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
lowercase =tokenizer
def _A( self ):
lowercase ='''<pad>'''
lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def _A( self ):
lowercase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(snake_case_ ) , 10_11_22 )
def _A( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def _A( self ):
lowercase =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase =[0, 57, 30_18, 7_03_07, 91, 2]
lowercase =self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors='''pt''' )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowercase =batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def _A( self ):
if not self.test_rust_tokenizer:
return
lowercase =self.get_tokenizer()
lowercase =self.get_rust_tokenizer()
lowercase ='''I was born in 92000, and this is falsé.'''
lowercase =tokenizer.tokenize(snake_case_ )
lowercase =rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
lowercase =rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =self.get_rust_tokenizer()
lowercase =tokenizer.encode(snake_case_ )
lowercase =rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def _A( self ):
# fmt: off
lowercase ={'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase =[
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=snake_case_ , )
| 72 | 1 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 72 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct_text_model'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=5_02_44 , snake_case_=7_68 , snake_case_=64 , snake_case_=20_48 , snake_case_=12 , snake_case_=12 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=False , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =hidden_size
lowercase =d_kv
lowercase =d_ff
lowercase =num_layers
lowercase =num_heads
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =dropout_rate
lowercase =layer_norm_epsilon
lowercase =initializer_factor
lowercase =use_cache
lowercase =eos_token_id
lowercase =decoder_start_token_id
# for backwards compatibility
lowercase =dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
lowercase , lowercase =cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct_vision_model'
def __init__( self , snake_case_=7_68 , snake_case_=7_68 , snake_case_=20_48 , snake_case_=64 , snake_case_=12 , snake_case_=12 , snake_case_="gelu_new" , snake_case_=1E-6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=1E-10 , snake_case_=1.0 , snake_case_=40_96 , snake_case_=32 , snake_case_=1_28 , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =hidden_size
lowercase =patch_embed_hidden_size
lowercase =d_ff
lowercase =dropout_rate
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =initializer_range
lowercase =initializer_factor
lowercase =attention_dropout
lowercase =layer_norm_eps
lowercase =dense_act_fn
lowercase =seq_len
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =d_kv
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
lowercase , lowercase =cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct'
UpperCamelCase__ = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=1.0 , snake_case_=0.02 , snake_case_=False , snake_case_=False , snake_case_=True , **snake_case_ , ):
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
lowercase ={}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase ={}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase =PixaStructTextConfig(**snake_case_ )
lowercase =PixaStructVisionConfig(**snake_case_ )
lowercase =self.text_config.decoder_start_token_id
lowercase =self.text_config.pad_token_id
lowercase =self.text_config.eos_token_id
lowercase =initializer_factor
lowercase =initializer_range
lowercase =self.initializer_range
lowercase =self.initializer_range
lowercase =is_vqa
@classmethod
def _A( cls , snake_case_ , snake_case_ , **snake_case_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def _A( self ):
lowercase =copy.deepcopy(self.__dict__ )
lowercase =self.text_config.to_dict()
lowercase =self.vision_config.to_dict()
lowercase =self.__class__.model_type
return output
| 72 | 1 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = '''https://openaipublic.azureedge.net/jukebox/models/'''
_UpperCAmelCase : Union[str, Any] = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def UpperCamelCase ( lowercase_ : int ) -> Dict:
'''simple docstring'''
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 1_0:
lowercase =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 1_0:
lowercase =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 1_0:
lowercase =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 1_0:
lowercase =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowercase =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowercase =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowercase =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : int ) -> str:
'''simple docstring'''
lowercase ={}
import re
lowercase =re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowercase =re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowercase =re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowercase =re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowercase =re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowercase =re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowercase =re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowercase =re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowercase =re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase_ ):
lowercase =re_encoder_block_conv_in.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =int(groups[2] ) * 2 + int(groups[3] )
lowercase =f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
lowercase =re_encoder_block_conv_in.sub(lowercase_ , lowercase_ )
elif re_encoder_block_resnet.fullmatch(lowercase_ ):
lowercase =re_encoder_block_resnet.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =int(groups[2] ) * 2 + int(groups[3] )
lowercase ={'''1''': 1, '''3''': 2}[groups[-2]]
lowercase =f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
lowercase =f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase =prefix + resnet_block
lowercase =re_encoder_block_resnet.sub(lowercase_ , lowercase_ )
elif re_encoder_block_proj_out.fullmatch(lowercase_ ):
lowercase =re_encoder_block_proj_out.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
lowercase =re_encoder_block_proj_out.sub(lowercase_ , lowercase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase_ ):
lowercase =re_decoder_block_conv_out.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase =f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
lowercase =re_decoder_block_conv_out.sub(lowercase_ , lowercase_ )
elif re_decoder_block_resnet.fullmatch(lowercase_ ):
lowercase =re_decoder_block_resnet.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase ={'''1''': 1, '''3''': 2}[groups[-2]]
lowercase =f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
lowercase =f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase =prefix + resnet_block
lowercase =re_decoder_block_resnet.sub(lowercase_ , lowercase_ )
elif re_decoder_block_proj_in.fullmatch(lowercase_ ):
lowercase =re_decoder_block_proj_in.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
lowercase =re_decoder_block_proj_in.sub(lowercase_ , lowercase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase_ ):
lowercase =re_prior_cond_conv_out.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase =f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
lowercase =re_prior_cond_conv_out.sub(lowercase_ , lowercase_ )
elif re_prior_cond_resnet.fullmatch(lowercase_ ):
lowercase =re_prior_cond_resnet.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase ={'''1''': 1, '''3''': 2}[groups[-2]]
lowercase =f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
lowercase =f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase =prefix + resnet_block
lowercase =re_prior_cond_resnet.sub(lowercase_ , lowercase_ )
elif re_prior_cond_proj_in.fullmatch(lowercase_ ):
lowercase =re_prior_cond_proj_in.match(lowercase_ )
lowercase =regex_match.groups()
lowercase =f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
lowercase =re_prior_cond_proj_in.sub(lowercase_ , lowercase_ )
# keep original key
else:
lowercase =original_key
lowercase =replace_key(lowercase_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
lowercase =model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
lowercase =original_key
lowercase =original_key
lowercase =value
return new_dict
@torch.no_grad()
def UpperCamelCase ( lowercase_ : Any=None , lowercase_ : List[Any]=None ) -> str:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
lowercase =requests.get(f'{PREFIX}{file}' , allow_redirects=lowercase_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=lowercase_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
lowercase =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowercase =JukeboxConfig.from_pretrained(lowercase_ )
lowercase =JukeboxModel(lowercase_ )
lowercase =[]
lowercase ={}
for i, dict_name in enumerate(lowercase_ ):
lowercase =torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
lowercase ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowercase =old_dic[k]
elif k.endswith('''.w''' ):
lowercase =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase =old_dic[k]
else:
lowercase =old_dic[k]
lowercase ='''vqvae''' if i == 0 else f'priors.{3 - i}'
lowercase =fix_jukebox_keys(lowercase_ , model.state_dict() , lowercase_ , lowercase_ )
weight_dict.append(lowercase_ )
lowercase =weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase_ )
for i in range(len(lowercase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(lowercase_ , lowercase_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
return weight_dict
if __name__ == "__main__":
_UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 72 |
'''simple docstring'''
def UpperCamelCase ( ) -> int:
'''simple docstring'''
return 1
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowercase_ )
def UpperCamelCase ( lowercase_ : int = 2_0_0 ) -> int:
'''simple docstring'''
return two_pound(lowercase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 72 | 1 |
'''simple docstring'''
_UpperCAmelCase : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def UpperCamelCase ( lowercase_ : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
lowercase =f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(lowercase_ )
lowercase =''''''.join(bin(lowercase_ )[2:].zfill(8 ) for byte in data )
lowercase =len(lowercase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase =b'''=''' * ((6 - len(lowercase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase_ ) % 6)
else:
lowercase =b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase_ ) , 6 ) ).encode()
+ padding
)
def UpperCamelCase ( lowercase_ : str ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
lowercase =(
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(lowercase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase_ , lowercase_ ):
try:
lowercase =encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase =encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase =encoded_data[:-padding]
lowercase =''''''.join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase =''''''.join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase_ ) , 8 )
]
return bytes(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BlipImageProcessor'
UpperCamelCase__ = 'AutoTokenizer'
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
# add QFormer tokenizer
lowercase =qformer_tokenizer
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase =BatchFeature()
if text is not None:
lowercase =self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
encoding.update(snake_case_ )
lowercase =self.qformer_tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
lowercase =qformer_text_encoding.pop('''input_ids''' )
lowercase =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase =self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _A( self ):
lowercase =self.tokenizer.model_input_names
lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _A( self , snake_case_ , **snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
lowercase =os.path.join(snake_case_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(snake_case_ )
return super().save_pretrained(snake_case_ , **snake_case_ )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
lowercase =AutoTokenizer.from_pretrained(snake_case_ , subfolder='''qformer_tokenizer''' )
lowercase =cls._get_arguments_from_pretrained(snake_case_ , **snake_case_ )
args.append(snake_case_ )
return cls(*snake_case_ )
| 72 | 1 |
'''simple docstring'''
import copy
import re
class __magic_name__ :
UpperCamelCase__ = 'hp'
UpperCamelCase__ = {}
UpperCamelCase__ = None
@classmethod
def _A( cls , snake_case_ , snake_case_ ):
lowercase =prefix
lowercase =defaults
cls.build_naming_info()
@staticmethod
def _A( snake_case_ , snake_case_ ):
if len(snake_case_ ) == 0:
return ""
lowercase =None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(snake_case_ ) + 1 ):
lowercase =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowercase =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(snake_case_ ):
lowercase =''''''
while integer != 0:
lowercase =chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
lowercase =0
while True:
lowercase =word + '''#''' + int_to_alphabetic(snake_case_ )
if sword in info["reverse_short_word"]:
continue
else:
lowercase =sword
break
lowercase =short_word
lowercase =word
return short_word
@staticmethod
def _A( snake_case_ , snake_case_ ):
lowercase =param_name.split('''_''' )
lowercase =[TrialShortNamer.shortname_for_word(snake_case_ , snake_case_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowercase =['''''', '''_''']
for separator in separators:
lowercase =separator.join(snake_case_ )
if shortname not in info["reverse_short_param"]:
lowercase =shortname
lowercase =param_name
return shortname
return param_name
@staticmethod
def _A( snake_case_ , snake_case_ ):
lowercase =TrialShortNamer.shortname_for_key(snake_case_ , snake_case_ )
lowercase =short_name
lowercase =param_name
@classmethod
def _A( cls ):
if cls.NAMING_INFO is not None:
return
lowercase ={
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
lowercase =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(snake_case_ , snake_case_ )
lowercase =info
@classmethod
def _A( cls , snake_case_ ):
cls.build_naming_info()
assert cls.PREFIX is not None
lowercase =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowercase =cls.NAMING_INFO['''short_param'''][k]
if isinstance(snake_case_ , snake_case_ ):
lowercase =1 if v else 0
lowercase ='''''' if isinstance(snake_case_ , (int, float) ) else '''-'''
lowercase =f'{key}{sep}{v}'
name.append(snake_case_ )
return "_".join(snake_case_ )
@classmethod
def _A( cls , snake_case_ ):
lowercase =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowercase =[]
else:
lowercase =repr.split('''_''' )
lowercase ={}
for value in values:
if "-" in value:
lowercase , lowercase =value.split('''-''' )
else:
lowercase =re.sub('''[0-9.]''' , '''''' , snake_case_ )
lowercase =float(re.sub('''[^0-9.]''' , '''''' , snake_case_ ) )
lowercase =cls.NAMING_INFO['''reverse_short_param'''][p_k]
lowercase =p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowercase =cls.DEFAULTS[k]
return parameters
| 72 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_UpperCAmelCase : Dict = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_UpperCAmelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _A( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=False ):
if rouge_types is None:
lowercase =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase =rouge_scorer.RougeScorer(rouge_types=snake_case_ , use_stemmer=snake_case_ )
if use_aggregator:
lowercase =scoring.BootstrapAggregator()
else:
lowercase =[]
for ref, pred in zip(snake_case_ , snake_case_ ):
lowercase =scorer.score(snake_case_ , snake_case_ )
if use_aggregator:
aggregator.add_scores(snake_case_ )
else:
scores.append(snake_case_ )
if use_aggregator:
lowercase =aggregator.aggregate()
else:
lowercase ={}
for key in scores[0]:
lowercase =[score[key] for score in scores]
return result
| 72 | 1 |
'''simple docstring'''
from ....utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , snake_case_ , snake_case_=None , snake_case_=20_48 ):
lowercase =config.__dict__
lowercase =modal_hidden_size
if num_labels:
lowercase =num_labels
| 72 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = '''▁'''
_UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
_UpperCAmelCase : Union[str, Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_UpperCAmelCase : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_="<pad>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<mask_2>" , snake_case_="<mask_1>" , snake_case_=None , snake_case_=1_03 , snake_case_ = None , **snake_case_ , ):
lowercase =offset
if additional_special_tokens is not None:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(snake_case_ )}, but is'
f' {type(snake_case_ )}' )
lowercase =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(snake_case_ ) , self.offset - 1 )
]
if len(set(snake_case_ ) ) != len(snake_case_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowercase =additional_special_tokens_extended
else:
lowercase =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , pad_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
lowercase =mask_token_sent
lowercase =vocab_file
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# add special tokens to encoder dict
lowercase ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase ={v: k for k, v in self.encoder.items()}
@property
def _A( self ):
return len(self.sp_model ) + self.offset
def _A( self ):
lowercase ={self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase =self.__dict__.copy()
lowercase =None
return state
def __setstate__( self , snake_case_ ):
lowercase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase ={}
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def _A( self , snake_case_ ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase =self.sp_model.piece_to_id(snake_case_ )
return sp_id + self.offset
def _A( self , snake_case_ ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase =self.sp_model.IdToPiece(index - self.offset )
return token
def _A( self , snake_case_ ):
lowercase =[]
lowercase =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
lowercase =[]
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def _A( self , snake_case_=False ):
return 1
def _A( self , snake_case_ ):
lowercase =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return self._special_token_mask(snake_case_ )
elif token_ids_a is None:
return self._special_token_mask(snake_case_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A( self , snake_case_ , snake_case_=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
lowercase =self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 72 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
'''nielsr/canine-s''': 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
_UpperCAmelCase : str = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Optional[Any] = 0xE000
_UpperCAmelCase : str = 0xE001
_UpperCAmelCase : Union[str, Any] = 0xE002
_UpperCAmelCase : Optional[int] = 0xE003
_UpperCAmelCase : Dict = 0xE004
# Maps special codepoints to human-readable names.
_UpperCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=False , snake_case_=20_48 , **snake_case_ , ):
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , model_max_length=snake_case_ , **snake_case_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowercase ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowercase =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowercase ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowercase =UNICODE_VOCAB_SIZE
lowercase =len(self._special_codepoints )
@property
def _A( self ):
return self._unicode_vocab_size
def _A( self , snake_case_ ):
return list(snake_case_ )
def _A( self , snake_case_ ):
try:
return ord(snake_case_ )
except TypeError:
raise ValueError(f'invalid token: \'{token}\'' )
def _A( self , snake_case_ ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(snake_case_ )
except TypeError:
raise ValueError(f'invalid id: {index}' )
def _A( self , snake_case_ ):
return "".join(snake_case_ )
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
lowercase =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
lowercase =[1] + ([0] * len(snake_case_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(snake_case_ )) + [1]
return result
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
lowercase =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _A( self , snake_case_ , snake_case_ = None ):
return ()
| 72 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> str:
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 72 | 1 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase : Dict = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_UpperCAmelCase : Dict = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_UpperCAmelCase : Tuple = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def _A( self , snake_case_ ):
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def _A( self , snake_case_ , snake_case_ , snake_case_=0.9 , snake_case_=3 , snake_case_=0.5 ):
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase =[
meteor_score.single_meteor_score(
word_tokenize(snake_case_ ) , word_tokenize(snake_case_ ) , alpha=snake_case_ , beta=snake_case_ , gamma=snake_case_ )
for ref, pred in zip(snake_case_ , snake_case_ )
]
else:
lowercase =[
meteor_score.single_meteor_score(snake_case_ , snake_case_ , alpha=snake_case_ , beta=snake_case_ , gamma=snake_case_ )
for ref, pred in zip(snake_case_ , snake_case_ )
]
return {"meteor": np.mean(snake_case_ )}
| 72 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowercase =np.full((len(lowercase_ ), sequence_length, 2) , lowercase_ )
else:
lowercase =np.full((len(lowercase_ ), sequence_length) , lowercase_ )
for i, tensor in enumerate(lowercase_ ):
if padding_side == "right":
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
else:
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
return out_tensor.tolist()
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> str:
'''simple docstring'''
lowercase =ord(lowercase_ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
lowercase =unicodedata.category(lowercase_ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -1_00
UpperCamelCase__ = "pt"
def _A( self , snake_case_ ):
import torch
lowercase ='''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase =[feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase =self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase =torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase =self.tokenizer.padding_side
if padding_side == "right":
lowercase =[
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
lowercase =[
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
lowercase =[feature['''ner_tags'''] for feature in features]
lowercase =padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
lowercase =[feature['''original_entity_spans'''] for feature in features]
lowercase =padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
lowercase ={k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 72 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : str = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 72 |
'''simple docstring'''
_UpperCAmelCase : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def UpperCamelCase ( lowercase_ : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
lowercase =f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(lowercase_ )
lowercase =''''''.join(bin(lowercase_ )[2:].zfill(8 ) for byte in data )
lowercase =len(lowercase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase =b'''=''' * ((6 - len(lowercase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase_ ) % 6)
else:
lowercase =b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase_ ) , 6 ) ).encode()
+ padding
)
def UpperCamelCase ( lowercase_ : str ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
lowercase =(
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(lowercase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase_ , lowercase_ ):
try:
lowercase =encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase =encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase =encoded_data[:-padding]
lowercase =''''''.join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase =''''''.join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase_ ) , 8 )
]
return bytes(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_UpperCAmelCase : str = logging.getLogger(__name__)
@dataclass
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to SortishSamler or not.'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'whether to use adafactor'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Dropout probability. Goes into model.config.'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
UpperCamelCase__ = field(
default='linear' , metadata={'help': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 72 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_UpperCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_UpperCAmelCase : str = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_UpperCAmelCase : Optional[int] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=False , lowercase_ : int="dummy_doc" ) -> str:
'''simple docstring'''
lowercase ={doc: key_lines}
lowercase ={doc: sys_lines}
lowercase ={}
lowercase =0
lowercase =0
lowercase =0
lowercase =0
lowercase =0
lowercase =0
lowercase , lowercase =reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowercase =reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
lowercase , lowercase =reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowercase =reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
lowercase , lowercase =reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowercase , lowercase =reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowercase =reader.get_mention_assignments(lowercase_ , lowercase_ )
lowercase =reader.get_mention_assignments(lowercase_ , lowercase_ )
lowercase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Tuple ) -> Dict:
'''simple docstring'''
lowercase =get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase ={}
lowercase =0
lowercase =0
for name, metric in metrics:
lowercase , lowercase , lowercase =evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , f'Recall: {recall * 1_0_0:.2f}' , f' Precision: {precision * 1_0_0:.2f}' , f' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
lowercase =(conll / 3) * 1_0_0
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def UpperCamelCase ( lowercase_ : Any ) -> List[Any]:
'''simple docstring'''
lowercase =False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowercase =line.split()[5]
if not parse_col == "-":
lowercase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _A( self , snake_case_ , snake_case_ , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False ):
lowercase =[
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowercase =util.check_gold_parse_annotation(snake_case_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowercase =evaluate(
key_lines=snake_case_ , sys_lines=snake_case_ , metrics=snake_case_ , NP_only=snake_case_ , remove_nested=snake_case_ , keep_singletons=snake_case_ , min_span=snake_case_ , )
return score
| 72 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def UpperCamelCase ( lowercase_ : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
lowercase =model(lowercase_ )
lowercase =F.mse_loss(lowercase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase_ )
def UpperCamelCase ( lowercase_ : str , lowercase_ : Dict=False ) -> int:
'''simple docstring'''
set_seed(4_2 )
lowercase =RegressionModel()
lowercase =deepcopy(lowercase_ )
lowercase =RegressionDataset(length=8_0 )
lowercase =DataLoader(lowercase_ , batch_size=1_6 )
model.to(accelerator.device )
if sched:
lowercase =AdamW(params=model.parameters() , lr=1E-3 )
lowercase =AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowercase =LambdaLR(lowercase_ , lr_lambda=lambda lowercase_ : epoch**0.6_5 )
lowercase =LambdaLR(lowercase_ , lr_lambda=lambda lowercase_ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowercase , lowercase , lowercase , lowercase =accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase , lowercase =accelerator.prepare(lowercase_ , lowercase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase ( lowercase_ : int ) -> List[str]:
'''simple docstring'''
lowercase , lowercase , lowercase =get_training_setup(lowercase_ )
# Use a single batch
lowercase , lowercase =next(iter(lowercase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase , lowercase =accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
# Sync grads
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowercase =ddp_input[torch.randperm(len(lowercase_ ) )]
def UpperCamelCase ( lowercase_ : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase , lowercase , lowercase =get_training_setup(lowercase_ )
# Use a single batch
lowercase , lowercase =next(iter(lowercase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase , lowercase =accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
# Sync grads
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowercase =ddp_input[torch.randperm(len(lowercase_ ) )]
def UpperCamelCase ( lowercase_ : Dict=False , lowercase_ : Tuple=False ) -> str:
'''simple docstring'''
lowercase =Accelerator(
split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase , lowercase , lowercase =get_training_setup(lowercase_ )
for iteration, batch in enumerate(lowercase_ ):
lowercase , lowercase =batch.values()
# Gather the distributed inputs and targs for the base model
lowercase , lowercase =accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowercase =ddp_input[torch.randperm(len(lowercase_ ) )]
GradientState._reset_state()
def UpperCamelCase ( lowercase_ : List[str]=False , lowercase_ : str=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase =Accelerator(
split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase =get_training_setup(lowercase_ , lowercase_ )
for iteration, batch in enumerate(lowercase_ ):
lowercase , lowercase =batch.values()
# Gather the distributed inputs and targs for the base model
lowercase , lowercase =accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
lowercase =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase =Accelerator()
lowercase =RegressionDataset(length=8_0 )
lowercase =DataLoader(lowercase_ , batch_size=1_6 )
lowercase =RegressionDataset(length=9_6 )
lowercase =DataLoader(lowercase_ , batch_size=1_6 )
lowercase , lowercase =accelerator.prepare(lowercase_ , lowercase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ )
if iteration < len(lowercase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ )
if batch_num < len(lowercase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowercase =Accelerator()
lowercase =accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(lowercase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(lowercase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase_ , lowercase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ : Dict ) -> Optional[int]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 72 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
lowercase =0
lowercase =2
while digits < n:
index += 1
lowercase =len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 72 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : int=8 ) -> Union[str, Any]:
'''simple docstring'''
lowercase =h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase =w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
super().__init__()
self.register_modules(
text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
lowercase =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if latents is None:
lowercase =randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase =latents.to(snake_case_ )
lowercase =latents * scheduler.init_noise_sigma
return latents
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , ):
lowercase =len(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else 1
# get prompt text embeddings
lowercase =self.tokenizer(
snake_case_ , padding='''max_length''' , truncation=snake_case_ , max_length=77 , return_attention_mask=snake_case_ , add_special_tokens=snake_case_ , return_tensors='''pt''' , )
lowercase =text_inputs.input_ids
lowercase =self.tokenizer(snake_case_ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(snake_case_ , snake_case_ ):
lowercase =self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowercase =text_input_ids.to(snake_case_ )
lowercase =text_inputs.attention_mask.to(snake_case_ )
lowercase , lowercase =self.text_encoder(
input_ids=snake_case_ , attention_mask=snake_case_ )
lowercase =prompt_embeds.repeat_interleave(snake_case_ , dim=0 )
lowercase =text_encoder_hidden_states.repeat_interleave(snake_case_ , dim=0 )
lowercase =text_mask.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
lowercase =42
if negative_prompt is None:
lowercase =[''''''] * batch_size
elif type(snake_case_ ) is not type(snake_case_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(snake_case_ )} !='
f' {type(snake_case_ )}.' )
elif isinstance(snake_case_ , snake_case_ ):
lowercase =[negative_prompt]
elif batch_size != len(snake_case_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(snake_case_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''' )
else:
lowercase =negative_prompt
lowercase =self.tokenizer(
snake_case_ , padding='''max_length''' , max_length=77 , truncation=snake_case_ , return_attention_mask=snake_case_ , add_special_tokens=snake_case_ , return_tensors='''pt''' , )
lowercase =uncond_input.input_ids.to(snake_case_ )
lowercase =uncond_input.attention_mask.to(snake_case_ )
lowercase , lowercase =self.text_encoder(
input_ids=snake_case_ , attention_mask=snake_case_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase =negative_prompt_embeds.shape[1]
lowercase =negative_prompt_embeds.repeat(1 , snake_case_ )
lowercase =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case_ )
lowercase =uncond_text_encoder_hidden_states.shape[1]
lowercase =uncond_text_encoder_hidden_states.repeat(1 , snake_case_ , 1 )
lowercase =uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , snake_case_ , -1 )
lowercase =uncond_text_mask.repeat_interleave(snake_case_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase =torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase =torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase =torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _A( self , snake_case_=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase =torch.device(f'cuda:{gpu_id}' )
lowercase =[
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def _A( self , snake_case_=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase =torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase =None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase , lowercase =cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
if self.safety_checker is not None:
lowercase , lowercase =cpu_offload_with_hook(self.safety_checker , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
lowercase =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = 5_12 , snake_case_ = 5_12 , snake_case_ = 1_00 , snake_case_ = 4.0 , snake_case_ = 1 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ):
if isinstance(snake_case_ , snake_case_ ):
lowercase =1
elif isinstance(snake_case_ , snake_case_ ):
lowercase =len(snake_case_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(snake_case_ )}' )
lowercase =self._execution_device
lowercase =batch_size * num_images_per_prompt
lowercase =guidance_scale > 1.0
lowercase , lowercase , lowercase =self._encode_prompt(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
lowercase =torch.cat(snake_case_ , dim=0 )
if isinstance(snake_case_ , snake_case_ ):
lowercase =torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
lowercase =image_embeds.repeat_interleave(snake_case_ , dim=0 )
lowercase =negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
lowercase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=snake_case_ )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
lowercase =self.scheduler.timesteps
lowercase =self.unet.config.in_channels
lowercase , lowercase =get_new_h_w(snake_case_ , snake_case_ , self.movq_scale_factor )
# create initial latent
lowercase =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase ={'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
lowercase =self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
lowercase , lowercase =noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase =noise_pred.chunk(2 )
lowercase , lowercase =variance_pred.chunk(2 )
lowercase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase =self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , ).prev_sample
# post-processing
lowercase =self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowercase =image * 0.5 + 0.5
lowercase =image.clamp(0 , 1 )
lowercase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase =self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 72 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'marian'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case_=5_81_01 , snake_case_=None , snake_case_=10_24 , snake_case_=12 , snake_case_=40_96 , snake_case_=16 , snake_case_=12 , snake_case_=40_96 , snake_case_=16 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=10_24 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=5_81_00 , snake_case_=False , snake_case_=5_81_00 , snake_case_=0 , snake_case_=0 , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =decoder_vocab_size or vocab_size
lowercase =max_position_embeddings
lowercase =d_model
lowercase =encoder_ffn_dim
lowercase =encoder_layers
lowercase =encoder_attention_heads
lowercase =decoder_ffn_dim
lowercase =decoder_layers
lowercase =decoder_attention_heads
lowercase =dropout
lowercase =attention_dropout
lowercase =activation_dropout
lowercase =activation_function
lowercase =init_std
lowercase =encoder_layerdrop
lowercase =decoder_layerdrop
lowercase =use_cache
lowercase =encoder_layers
lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _A( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase ={0: '''batch'''}
lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase , lowercase =self.num_layers
for i in range(snake_case_ ):
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _A( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =super().outputs
else:
lowercase =super(snake_case_ , self ).outputs
if self.use_past:
lowercase , lowercase =self.num_layers
for i in range(snake_case_ ):
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
lowercase =seq_length if not self.use_past else 1
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
lowercase ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase =dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase =common_inputs['''input_ids'''].shape
lowercase =common_inputs['''decoder_input_ids'''].shape[1]
lowercase , lowercase =self.num_attention_heads
lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase =decoder_seq_length + 3
lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase =self.num_layers
lowercase =min(snake_case_ , snake_case_ )
lowercase =max(snake_case_ , snake_case_ ) - min_num_layers
lowercase ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
lowercase =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase =seqlen + 2
lowercase , lowercase =self.num_layers
lowercase , lowercase =self.num_attention_heads
lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase =common_inputs['''attention_mask'''].dtype
lowercase =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
lowercase =[
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase =compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase =tokenizer.num_special_tokens_to_add(snake_case_ )
lowercase =compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
lowercase =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase =dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
lowercase =self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
lowercase =super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
@property
def _A( self ):
return 1E-4
| 72 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : int ) -> int:
'''simple docstring'''
lowercase =tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
lowercase =DatasetInfosDict.from_directory(lowercase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , ),
] , )
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : DatasetInfo ) -> List[str]:
'''simple docstring'''
lowercase =str(lowercase_ )
dataset_info.write_to_directory(lowercase_ )
lowercase =DatasetInfo.from_directory(lowercase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase_ , '''dataset_info.json''' ) )
def UpperCamelCase ( ) -> str:
'''simple docstring'''
lowercase =DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
lowercase =dataset_info._to_yaml_dict()
assert sorted(lowercase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase =yaml.safe_dump(lowercase_ )
lowercase =yaml.safe_load(lowercase_ )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowercase =DatasetInfo()
lowercase =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=4_2 ),
'''v2''': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : DatasetInfosDict ) -> Optional[int]:
'''simple docstring'''
lowercase =str(lowercase_ )
dataset_infos_dict.write_to_directory(lowercase_ )
lowercase =DatasetInfosDict.from_directory(lowercase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase_ , '''README.md''' ) )
| 72 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCAmelCase : Dict = parse(importlib.metadata.version('''torch'''))
def UpperCamelCase ( lowercase_ : Union[str, Version] , lowercase_ : str , lowercase_ : str ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase =STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ , lowercase_ ):
lowercase =parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ , parse(lowercase_ ) )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return compare_versions(lowercase_ , lowercase_ , lowercase_ )
| 72 | 1 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : int ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
lowercase =rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase =years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 1 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> int:
'''simple docstring'''
return abs(lowercase_ ) if a == 0 else greatest_common_divisor(b % a , lowercase_ )
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowercase , lowercase =y, x % y
return abs(lowercase_ )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
try:
lowercase =input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
lowercase =int(nums[0] )
lowercase =int(nums[1] )
print(
f'greatest_common_divisor({num_a}, {num_a}) = '
f'{greatest_common_divisor(lowercase_ , lowercase_ )}' )
print(f'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowercase_ , lowercase_ )}' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 72 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase : Dict = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_UpperCAmelCase : Dict = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_UpperCAmelCase : Tuple = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def _A( self , snake_case_ ):
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def _A( self , snake_case_ , snake_case_ , snake_case_=0.9 , snake_case_=3 , snake_case_=0.5 ):
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase =[
meteor_score.single_meteor_score(
word_tokenize(snake_case_ ) , word_tokenize(snake_case_ ) , alpha=snake_case_ , beta=snake_case_ , gamma=snake_case_ )
for ref, pred in zip(snake_case_ , snake_case_ )
]
else:
lowercase =[
meteor_score.single_meteor_score(snake_case_ , snake_case_ , alpha=snake_case_ , beta=snake_case_ , gamma=snake_case_ )
for ref, pred in zip(snake_case_ , snake_case_ )
]
return {"meteor": np.mean(snake_case_ )}
| 72 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , snake_case_ = 1_28 , snake_case_ = 2_56 , snake_case_ = 20_00.0 , snake_case_ = 7_68 , snake_case_ = 12 , snake_case_ = 12 , snake_case_ = 64 , snake_case_ = 20_48 , snake_case_ = 0.1 , ):
super().__init__()
lowercase =nn.Sequential(
nn.Linear(snake_case_ , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , )
lowercase =nn.Embedding(snake_case_ , snake_case_ )
lowercase =False
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowercase =nn.Dropout(p=snake_case_ )
lowercase =nn.ModuleList()
for lyr_num in range(snake_case_ ):
# FiLM conditional T5 decoder
lowercase =DecoderLayer(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
self.decoders.append(snake_case_ )
lowercase =TaLayerNorm(snake_case_ )
lowercase =nn.Dropout(p=snake_case_ )
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
def _A( self , snake_case_ , snake_case_ ):
lowercase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
lowercase , lowercase , lowercase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase =self.conditioning_emb(snake_case_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase =torch.broadcast_to(
torch.arange(snake_case_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase =self.position_encoding(snake_case_ )
lowercase =self.continuous_inputs_projection(snake_case_ )
inputs += position_encodings
lowercase =self.dropout(snake_case_ )
# decoder: No padding present.
lowercase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase =[(x, self.encoder_decoder_mask(snake_case_ , snake_case_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase =lyr(
snake_case_ , conditioning_emb=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )[0]
lowercase =self.decoder_norm(snake_case_ )
lowercase =self.post_dropout(snake_case_ )
lowercase =self.spec_out(snake_case_ )
return spec_out
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=1E-6 ):
super().__init__()
lowercase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ ) )
def _A( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ):
lowercase =self.layer[0](
snake_case_ , conditioning_emb=snake_case_ , attention_mask=snake_case_ , )
if encoder_hidden_states is not None:
lowercase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
lowercase =self.layer[1](
snake_case_ , key_value_states=snake_case_ , attention_mask=snake_case_ , )
# Apply Film Conditional Feed Forward layer
lowercase =self.layer[-1](snake_case_ , snake_case_ )
return (hidden_states,)
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
super().__init__()
lowercase =TaLayerNorm(snake_case_ )
lowercase =TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
lowercase =Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
lowercase =nn.Dropout(snake_case_ )
def _A( self , snake_case_ , snake_case_=None , snake_case_=None , ):
# pre_self_attention_layer_norm
lowercase =self.layer_norm(snake_case_ )
if conditioning_emb is not None:
lowercase =self.FiLMLayer(snake_case_ , snake_case_ )
# Self-attention block
lowercase =self.attention(snake_case_ )
lowercase =hidden_states + self.dropout(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
super().__init__()
lowercase =Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
lowercase =TaLayerNorm(snake_case_ , eps=snake_case_ )
lowercase =nn.Dropout(snake_case_ )
def _A( self , snake_case_ , snake_case_=None , snake_case_=None , ):
lowercase =self.layer_norm(snake_case_ )
lowercase =self.attention(
snake_case_ , encoder_hidden_states=snake_case_ , attention_mask=attention_mask.squeeze(1 ) , )
lowercase =hidden_states + self.dropout(snake_case_ )
return layer_output
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
super().__init__()
lowercase =TaDenseGatedActDense(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
lowercase =TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
lowercase =TaLayerNorm(snake_case_ , eps=snake_case_ )
lowercase =nn.Dropout(snake_case_ )
def _A( self , snake_case_ , snake_case_=None ):
lowercase =self.layer_norm(snake_case_ )
if conditioning_emb is not None:
lowercase =self.film(snake_case_ , snake_case_ )
lowercase =self.DenseReluDense(snake_case_ )
lowercase =hidden_states + self.dropout(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__()
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowercase =nn.Dropout(snake_case_ )
lowercase =NewGELUActivation()
def _A( self , snake_case_ ):
lowercase =self.act(self.wi_a(snake_case_ ) )
lowercase =self.wi_a(snake_case_ )
lowercase =hidden_gelu * hidden_linear
lowercase =self.dropout(snake_case_ )
lowercase =self.wo(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_=1E-6 ):
super().__init__()
lowercase =nn.Parameter(torch.ones(snake_case_ ) )
lowercase =eps
def _A( self , snake_case_ ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowercase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case_ )
lowercase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __magic_name__ ( nn.Module ):
def _A( self , snake_case_ ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(snake_case_ , 3.0 )) ))
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ ):
super().__init__()
lowercase =nn.Linear(snake_case_ , out_features * 2 , bias=snake_case_ )
def _A( self , snake_case_ , snake_case_ ):
lowercase =self.scale_bias(snake_case_ )
lowercase , lowercase =torch.chunk(snake_case_ , 2 , -1 )
lowercase =x * (1 + scale) + shift
return x
| 72 |
'''simple docstring'''
import sys
_UpperCAmelCase : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCamelCase ( lowercase_ : str = N ) -> int:
'''simple docstring'''
lowercase =-sys.maxsize - 1
for i in range(len(lowercase_ ) - 1_2 ):
lowercase =1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase =product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'philschmid/bart-large-cnn-samsum'
UpperCamelCase__ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
UpperCamelCase__ = 'summarizer'
UpperCamelCase__ = AutoTokenizer
UpperCamelCase__ = AutoModelForSeqaSeqLM
UpperCamelCase__ = ['text']
UpperCamelCase__ = ['text']
def _A( self , snake_case_ ):
return self.pre_processor(snake_case_ , return_tensors='''pt''' , truncation=snake_case_ )
def _A( self , snake_case_ ):
return self.model.generate(**snake_case_ )[0]
def _A( self , snake_case_ ):
return self.pre_processor.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
| 72 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_UpperCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 72 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : float = 0.0 , lowercase_ : float = 1.0 ) -> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'encodec'
def __init__( self , snake_case_=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case_=2_40_00 , snake_case_=1 , snake_case_=False , snake_case_=None , snake_case_=None , snake_case_=1_28 , snake_case_=32 , snake_case_=1 , snake_case_=[8, 5, 4, 2] , snake_case_="weight_norm" , snake_case_=7 , snake_case_=7 , snake_case_=3 , snake_case_=2 , snake_case_=True , snake_case_="reflect" , snake_case_=2 , snake_case_=2 , snake_case_=1.0 , snake_case_=10_24 , snake_case_=None , snake_case_=True , **snake_case_ , ):
lowercase =target_bandwidths
lowercase =sampling_rate
lowercase =audio_channels
lowercase =normalize
lowercase =chunk_length_s
lowercase =overlap
lowercase =hidden_size
lowercase =num_filters
lowercase =num_residual_layers
lowercase =upsampling_ratios
lowercase =norm_type
lowercase =kernel_size
lowercase =last_kernel_size
lowercase =residual_kernel_size
lowercase =dilation_growth_rate
lowercase =use_causal_conv
lowercase =pad_mode
lowercase =compress
lowercase =num_lstm_layers
lowercase =trim_right_ratio
lowercase =codebook_size
lowercase =codebook_dim if codebook_dim is not None else hidden_size
lowercase =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**snake_case_ )
@property
def _A( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _A( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _A( self ):
lowercase =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _A( self ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 72 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : list[float] ) -> float:
'''simple docstring'''
lowercase =0.0_0
lowercase =0
for resistor in resistors:
if resistor <= 0:
lowercase =f'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowercase_ )
first_sum += 1 / float(lowercase_ )
index += 1
return 1 / first_sum
def UpperCamelCase ( lowercase_ : list[float] ) -> float:
'''simple docstring'''
lowercase =0.0_0
lowercase =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase =f'Resistor at index {index} has a negative value!'
raise ValueError(lowercase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : int = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.