code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A__ , A__ : List[str] = array[indexa], array[indexa]
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if length > 1:
A__ : int = int(length / 2 )
for i in range(lowerCAmelCase , low + middle ):
comp_and_swap(lowerCAmelCase , lowerCAmelCase , i + middle , lowerCAmelCase )
bitonic_merge(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
bitonic_merge(lowerCAmelCase , low + middle , lowerCAmelCase , lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if length > 1:
A__ : Union[str, Any] = int(length / 2 )
bitonic_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , 1 )
bitonic_sort(lowerCAmelCase , low + middle , lowerCAmelCase , 0 )
bitonic_merge(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 363 | """simple docstring"""
from collections.abc import Sequence
def _A( lowerCAmelCase , lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase ) )
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : str = 0.0
for coeff in reversed(lowerCAmelCase ):
A__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 363 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 713 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36 | 0 |
from manim import *
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = Rectangle(height=0.25 , width=0.25 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = Text("CPU" , font_size=24 )
snake_case_ = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
snake_case_ = [mem.copy() for i in range(4 )]
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = Text("GPU" , font_size=24 )
snake_case_ = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = Text("Model" , font_size=24 )
snake_case_ = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(__lowercase ):
snake_case_ = fill.copy().set_fill(__lowercase , opacity=0.8 )
target.move_to(__lowercase )
model_arr.append(__lowercase )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = Text("Disk" , font_size=24 )
snake_case_ = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowercase , __lowercase )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
snake_case_ = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
snake_case_ = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) )
snake_case_ = Square(0.3 )
input.set_fill(__lowercase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowercase , buff=0.5 )
self.play(Write(__lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowercase , buff=0.02 )
self.play(MoveToTarget(__lowercase ) )
self.play(FadeOut(__lowercase ) )
snake_case_ = Arrow(start=__lowercase , end=__lowercase , color=__lowercase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowercase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
snake_case_ = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
snake_case_ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__lowercase ) , Circumscribe(model_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_cpu_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
snake_case_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowercase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
snake_case_ = AnimationGroup(
FadeOut(__lowercase , run_time=0.5 ) , MoveToTarget(__lowercase , run_time=0.5 ) , FadeIn(__lowercase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
snake_case_ = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_arr[i + 1] , color=__lowercase , **__lowercase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowercase , **__lowercase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
snake_case_ = a_c
snake_case_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowercase ) , FadeOut(__lowercase , run_time=0.5 ) , )
snake_case_ = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) , MoveToTarget(__lowercase ) )
self.wait()
| 376 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ : Dict = 16
lowercase__ : str = 32
def lowerCamelCase__ ( _A , _A = 16 , _A = "bert-base-cased" ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained(_A )
snake_case_ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A )
snake_case_ = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["lr"]
snake_case_ = int(config["num_epochs"] )
snake_case_ = int(config["seed"] )
snake_case_ = int(config["batch_size"] )
snake_case_ = args.model_name_or_path
set_seed(_A )
snake_case_ , snake_case_ = get_dataloaders(_A , _A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=_A )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case_ = 1
snake_case_ = (len(_A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , )
else:
snake_case_ = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_A , _A , _A , _A , _A )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("glue" , "mrpc" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(_A , _A ):
model.train()
for step, batch in enumerate(_A ):
snake_case_ = model(**_A )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**_A )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_A ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_A , references=_A , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _A )
snake_case_ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(_A , _A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_A , )
parser.add_argument(
"--output_dir" , type=_A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=_A , default=_A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=_A , default=3 , help="Number of train epochs." , )
snake_case_ = parser.parse_args()
snake_case_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 376 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case ( self , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {}
if top_k is not None:
lowerCAmelCase__ :Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = load_image(__UpperCAmelCase )
lowerCAmelCase__ :str = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase__ :int = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ :Tuple = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase__ :Tuple = probs.topk(__UpperCAmelCase )
elif self.framework == "tf":
lowerCAmelCase__ :Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCAmelCase__ :List[str] = tf.math.top_k(__UpperCAmelCase , k=__UpperCAmelCase )
lowerCAmelCase__ :Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ :Optional[int] = scores.tolist()
lowerCAmelCase__ :int = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase , __UpperCAmelCase )]
| 718 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__A = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = UNetaDModel
__magic_name__ :Tuple = """sample"""
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 4
lowerCAmelCase__ :Dict = 3
lowerCAmelCase__ :int = (3_2, 3_2)
lowerCAmelCase__ :List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.tensor([1_0] ).to(__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
lowerCAmelCase__ :int = self.dummy_input
return init_dict, inputs_dict
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = UNetaDModel
__magic_name__ :List[str] = """sample"""
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :str = (3_2, 3_2)
lowerCAmelCase__ :Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([1_0] ).to(__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (4, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (4, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
lowerCAmelCase__ :Dict = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
model_accelerate.to(__UpperCAmelCase )
model_accelerate.eval()
lowerCAmelCase__ :List[str] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ :List[str] = noise.to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model_accelerate(__UpperCAmelCase , __UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase , low_cpu_mem_usage=__UpperCAmelCase )
model_normal_load.to(__UpperCAmelCase )
model_normal_load.eval()
lowerCAmelCase__ :Optional[int] = model_normal_load(__UpperCAmelCase , __UpperCAmelCase )['sample']
assert torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ :int = noise.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Tuple = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) )
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = UNetaDModel
__magic_name__ :Optional[int] = """sample"""
@property
def snake_case ( self , __UpperCAmelCase=(3_2, 3_2) ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 4
lowerCAmelCase__ :int = 3
lowerCAmelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
lowerCAmelCase__ :Any = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.dummy_input
lowerCAmelCase__ :Union[str, Any] = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = noise
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 4
lowerCAmelCase__ :Any = 3
lowerCAmelCase__ :Dict = (2_5_6, 2_5_6)
lowerCAmelCase__ :int = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :str = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ :int = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 4
lowerCAmelCase__ :List[Any] = 3
lowerCAmelCase__ :Dict = (3_2, 3_2)
lowerCAmelCase__ :Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Any = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) )
def snake_case ( self ):
'''simple docstring'''
pass
| 560 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 280 |
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 0:
return False
snake_case__ : int =num
snake_case__ : int =0
while num > 0:
snake_case__ : int =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381 | 0 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a ):
if "xprophetnet" in prophetnet_checkpoint_path:
SCREAMING_SNAKE_CASE_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__a )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = XLMProphetNetForConditionalGeneration.from_pretrained(
__a, output_loading_info=__a )
else:
SCREAMING_SNAKE_CASE_ = ProphetNetForConditionalGenerationOld.from_pretrained(__a )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ProphetNetForConditionalGeneration.from_pretrained(
__a, output_loading_info=__a )
SCREAMING_SNAKE_CASE_ = ['''key_proj''', '''value_proj''', '''query_proj''']
SCREAMING_SNAKE_CASE_ = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
SCREAMING_SNAKE_CASE_ = key.split('''.''' )
if attributes[0] == "lm_head":
SCREAMING_SNAKE_CASE_ = prophet
SCREAMING_SNAKE_CASE_ = prophet_old
else:
SCREAMING_SNAKE_CASE_ = prophet.prophetnet
SCREAMING_SNAKE_CASE_ = prophet_old.model
SCREAMING_SNAKE_CASE_ = False
for attribute in attributes:
if attribute in mapping:
SCREAMING_SNAKE_CASE_ = mapping[attribute]
if not hasattr(__a, __a ) and len(__a ) > 0:
SCREAMING_SNAKE_CASE_ = attribute
elif hasattr(__a, __a ):
SCREAMING_SNAKE_CASE_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE_ = old_model.weight
logger.info(F'{attribute} is initialized.' )
SCREAMING_SNAKE_CASE_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE_ = old_model.bias
logger.info(F'{attribute} is initialized' )
SCREAMING_SNAKE_CASE_ = True
break
elif attribute in special_keys and hasattr(__a, '''in_proj_weight''' ):
SCREAMING_SNAKE_CASE_ = old_model.in_proj_weight.shape[0] // 3
SCREAMING_SNAKE_CASE_ = getattr(__a, __a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
SCREAMING_SNAKE_CASE_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
SCREAMING_SNAKE_CASE_ = True
break
if attribute.isdigit():
SCREAMING_SNAKE_CASE_ = model[int(__a )]
SCREAMING_SNAKE_CASE_ = old_model[int(__a )]
else:
SCREAMING_SNAKE_CASE_ = getattr(__a, __a )
if old_attribute == "":
SCREAMING_SNAKE_CASE_ = old_model
else:
if not hasattr(__a, __a ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
SCREAMING_SNAKE_CASE_ = getattr(__a, __a )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 704 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 0 |
from __future__ import annotations
import math
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowerCAmelCase__ = []
for num in range(len(lowerCAmelCase_ ) ):
lowerCAmelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase_ ) == n:
return list_nums
return []
def _A ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 61 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
A_ = VideoMAEConfig()
set_architecture_configs(UpperCAmelCase__, UpperCAmelCase__ )
if "finetuned" not in model_name:
A_ = False
if "finetuned" in model_name:
A_ = """huggingface/label-files"""
if "kinetics" in model_name:
A_ = 4_00
A_ = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
A_ = 1_74
A_ = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
A_ = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if "small" in model_name:
A_ = 3_84
A_ = 15_36
A_ = 12
A_ = 16
A_ = 12
A_ = 3
A_ = 1_92
A_ = 7_68
elif "large" in model_name:
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
A_ = 12
A_ = 8
A_ = 5_12
A_ = 20_48
elif "huge" in model_name:
A_ = 12_80
A_ = 51_20
A_ = 32
A_ = 16
A_ = 12
A_ = 8
A_ = 6_40
A_ = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
if "encoder." in name:
A_ = name.replace("""encoder.""", """""" )
if "cls_token" in name:
A_ = name.replace("""cls_token""", """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
A_ = name.replace("""decoder_pos_embed""", """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
A_ = name.replace("""pos_embed""", """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ = name.replace("""patch_embed.proj""", """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ = name.replace("""patch_embed.norm""", """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
A_ = name.replace("""decoder.blocks""", """decoder.decoder_layers""" )
if "blocks" in name:
A_ = name.replace("""blocks""", """videomae.encoder.layer""" )
if "attn.proj" in name:
A_ = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name and "bias" not in name:
A_ = name.replace("""attn""", """attention.self""" )
if "attn" in name:
A_ = name.replace("""attn""", """attention.attention""" )
if "norm1" in name:
A_ = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A_ = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
A_ = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A_ = name.replace("""mlp.fc2""", """output.dense""" )
if "decoder_embed" in name:
A_ = name.replace("""decoder_embed""", """decoder.decoder_embed""" )
if "decoder_norm" in name:
A_ = name.replace("""decoder_norm""", """decoder.decoder_norm""" )
if "decoder_pred" in name:
A_ = name.replace("""decoder_pred""", """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A_ = name.replace("""norm.weight""", """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A_ = name.replace("""norm.bias""", """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
A_ = name.replace("""head""", """classifier""" )
return name
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(UpperCAmelCase__ )
if key.startswith("""encoder.""" ):
A_ = key.replace("""encoder.""", """""" )
if "qkv" in key:
A_ = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
A_ = config.decoder_hidden_size
A_ = int(key_split[2] )
A_ = """decoder.decoder_layers."""
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = config.hidden_size
A_ = int(key_split[1] )
A_ = """videomae.encoder.layer."""
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Any:
A_ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" )
A_ = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = get_videomae_config(UpperCAmelCase__ )
if "finetuned" in model_name:
A_ = VideoMAEForVideoClassification(UpperCAmelCase__ )
else:
A_ = VideoMAEForPreTraining(UpperCAmelCase__ )
# download original checkpoint, hosted on Google Drive
A_ = """pytorch_model.bin"""
gdown.cached_download(UpperCAmelCase__, UpperCAmelCase__, quiet=UpperCAmelCase__ )
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )
if "model" in files:
A_ = files["""model"""]
else:
A_ = files["""module"""]
A_ = convert_state_dict(UpperCAmelCase__, UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# verify model on basic input
A_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
A_ = prepare_video()
A_ = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
if "finetuned" not in model_name:
A_ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""", filename="""bool_masked_pos.pt""" )
A_ = torch.load(UpperCAmelCase__ )
A_ = model(**UpperCAmelCase__ )
A_ = outputs.logits
A_ = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
A_ = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3], UpperCAmelCase__, atol=1e-4 )
else:
print("""Logits:""", logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3], UpperCAmelCase__, atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
A_ = outputs.loss
assert torch.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(UpperCAmelCase__, organization="""nielsr""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 288 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = ['pixel_values']
def __init__(self : Any , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : int = 8 , **a__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_pad
__snake_case = pad_size
def a (self : str , a__ : np.ndarray , a__ : float , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] ):
"""simple docstring"""
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def a (self : Union[str, Any] , a__ : np.ndarray , a__ : int , a__ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
__snake_case , __snake_case = get_image_size(a__ )
__snake_case = (old_height // size + 1) * size - old_height
__snake_case = (old_width // size + 1) * size - old_width
return pad(a__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=a__ )
def a (self : Tuple , a__ : ImageInput , a__ : Optional[bool] = None , a__ : Optional[float] = None , a__ : Optional[bool] = None , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a__ : Tuple , ):
"""simple docstring"""
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_pad if do_pad is not None else self.do_pad
__snake_case = pad_size if pad_size is not None else self.pad_size
__snake_case = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a__ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_pad:
__snake_case = [self.pad(a__ , size=a__ ) for image in images]
__snake_case = [to_channel_dimension_format(a__ , a__ ) for image in images]
__snake_case = {'''pixel_values''': images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 388 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Tuple ) -> Dict:
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
__snake_case = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
__snake_case = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCamelCase__ ( snake_case_ : int ) -> List[str]:
if "visual_encoder" in key:
__snake_case = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case_ )
if "blocks" in key:
__snake_case = re.sub(R'''blocks''' , '''layers''' , snake_case_ )
if "attn" in key:
__snake_case = re.sub(R'''attn''' , '''self_attn''' , snake_case_ )
if "norm1" in key:
__snake_case = re.sub(R'''norm1''' , '''layer_norm1''' , snake_case_ )
if "norm2" in key:
__snake_case = re.sub(R'''norm2''' , '''layer_norm2''' , snake_case_ )
if "encoder.norm" in key:
__snake_case = re.sub(R'''encoder.norm''' , '''post_layernorm''' , snake_case_ )
if "encoder.patch_embed.proj" in key:
__snake_case = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case_ )
if "encoder.pos_embed" in key:
__snake_case = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case_ )
if "encoder.cls_token" in key:
__snake_case = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case_ )
if "self_attn" in key:
__snake_case = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , snake_case_ )
return key
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : str=None ) -> str:
if config_path is not None:
__snake_case = BlipConfig.from_pretrained(snake_case_ )
else:
__snake_case = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__snake_case = BlipForConditionalGeneration(snake_case_ ).eval()
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__snake_case = blip_decoder(pretrained=snake_case_ , image_size=384 , vit='''base''' )
__snake_case = pt_model.eval()
__snake_case = pt_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
hf_model.load_state_dict(snake_case_ )
__snake_case = 384
__snake_case = load_demo_image(image_size=snake_case_ , device='''cpu''' )
__snake_case = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__snake_case = tokenizer(['''a picture of'''] ).input_ids
__snake_case = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__snake_case = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__snake_case = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__snake_case = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
vqa_model.eval()
__snake_case = vqa_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
__snake_case = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
__snake_case = ['''How many dogs are in this image?''']
__snake_case = tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
__snake_case = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__snake_case = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
itm_model.eval()
__snake_case = itm_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
__snake_case = BlipForImageTextRetrieval(snake_case_ )
__snake_case = ['''A picture of a woman with a dog sitting in a beach''']
__snake_case = tokenizer(
snake_case_ , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
__snake_case = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
__snake_case = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
snake_case_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 388 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_a = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_a = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_a = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def a__ ( self ) -> int:
_A : Optional[int] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
_A : int = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
_A : int = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
_A : Any = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
_A : Optional[Any] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
_A : List[Any] = text_classifier("""This is great !""" , return_all_scores=UpperCamelCase__ )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
_A : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
_A : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
_A : List[Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def a__ ( self ) -> Optional[int]:
import torch
_A : Optional[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
_A : List[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def a__ ( self ) -> List[Any]:
_A : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
_A : List[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def a__ ( self ) -> List[str]:
_A : Optional[Any] = pipeline("""text-classification""" )
_A : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
_A : Union[str, Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
_A : Optional[int] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : Any = pipeline("""text-classification""" , framework="""tf""" )
_A : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
_A : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
_A : List[Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : Any = TextClassificationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def a__ ( self , _a , _a ) -> str:
_A : str = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_A : List[Any] = '''HuggingFace is in'''
_A : str = text_classifier(UpperCamelCase__ )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
_A : Any = ['''HuggingFace is in ''', '''Paris is in France''']
_A : int = text_classifier(UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}, {"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_A : List[str] = text_classifier(UpperCamelCase__ , top_k=UpperCamelCase__ )
_A : List[str] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [[{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] * N, [{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] * N] , )
_A : Dict = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
_A : List[str] = text_classifier(UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_A : List[str] = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(UpperCamelCase__ ):
text_classifier(UpperCamelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_A : List[str] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 307 | import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Tuple = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Dict = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : List[str] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : List[str]="<pad>" , UpperCamelCase__ : Dict=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : str = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : List[Any] = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : Dict = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : Optional[int] = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 248 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
SCREAMING_SNAKE_CASE : Optional[int] = tuple[int, int]
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a ):
"""simple docstring"""
A__ = vertices
A__ = {
(min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items()
}
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A__ = weight
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = Graph({min(self.vertices )} , {} )
A__ = 42
A__ = 42
A__ = 42
A__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
A__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A__ = edge
A__ = weight
subgraph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return subgraph
def __lowerCamelCase ( lowerCAmelCase__ = "p107_network.txt" ):
A__ = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
A__ = os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
A__ = {}
A__ = 42
A__ = 42
A__ = 42
with open(_UpperCAmelCase ) as f:
A__ = f.read().strip().split('\n' )
A__ = [line.split(',' ) for line in data]
for edgea in range(1 ,len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
A__ = int(adjaceny_matrix[edgea][edgea] )
A__ = Graph(set(range(len(_UpperCAmelCase ) ) ) ,_UpperCAmelCase )
A__ = graph.prims_algorithm()
A__ = sum(graph.edges.values() )
A__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 716 |
"""simple docstring"""
import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ):
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( lowerCAmelCase__ ):
return vector * sigmoid(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 | 0 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( UpperCamelCase ):
lowercase_ : List[str] = (CMStochasticIterativeScheduler,)
lowercase_ : List[Any] = 10
def UpperCAmelCase__ ( self : Union[str, Any] , **UpperCAmelCase : Optional[Any] ) -> Optional[int]:
lowerCAmelCase :Any = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self : List[str] ) -> str:
lowerCAmelCase :Optional[Any] = 10
lowerCAmelCase :Optional[int] = self.get_scheduler_config()
lowerCAmelCase :Union[str, Any] = self.scheduler_classes[0](**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = scheduler.timesteps[0]
lowerCAmelCase :List[Any] = scheduler.timesteps[1]
lowerCAmelCase :int = self.dummy_sample
lowerCAmelCase :Union[str, Any] = 0.1 * sample
lowerCAmelCase :List[str] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
lowerCAmelCase :List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self : Dict ) -> str:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase :Tuple = self.scheduler_classes[0]
lowerCAmelCase :Dict = self.get_scheduler_config()
lowerCAmelCase :Optional[int] = scheduler_class(**UpperCAmelCase )
lowerCAmelCase :str = 1
scheduler.set_timesteps(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = scheduler.timesteps
lowerCAmelCase :List[str] = torch.manual_seed(0 )
lowerCAmelCase :Dict = self.dummy_model()
lowerCAmelCase :Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase ):
# 1. scale model input
lowerCAmelCase :Optional[int] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# 2. predict noise residual
lowerCAmelCase :List[Any] = model(UpperCAmelCase , UpperCAmelCase )
# 3. predict previous sample x_t-1
lowerCAmelCase :str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
lowerCAmelCase :Optional[Any] = pred_prev_sample
lowerCAmelCase :int = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCAmelCase :Any = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ) -> str:
lowerCAmelCase :List[str] = self.scheduler_classes[0]
lowerCAmelCase :int = self.get_scheduler_config()
lowerCAmelCase :Union[str, Any] = scheduler_class(**UpperCAmelCase )
lowerCAmelCase :int = [106, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCAmelCase :List[str] = scheduler.timesteps
lowerCAmelCase :List[str] = torch.manual_seed(0 )
lowerCAmelCase :Dict = self.dummy_model()
lowerCAmelCase :Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCAmelCase :int = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# 2. predict noise residual
lowerCAmelCase :int = model(UpperCAmelCase , UpperCAmelCase )
# 3. predict previous sample x_t-1
lowerCAmelCase :str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
lowerCAmelCase :Optional[Any] = pred_prev_sample
lowerCAmelCase :List[Any] = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCAmelCase :int = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
lowerCAmelCase :Dict = self.scheduler_classes[0]
lowerCAmelCase :Optional[int] = self.get_scheduler_config()
lowerCAmelCase :str = scheduler_class(**UpperCAmelCase )
lowerCAmelCase :Any = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase :Tuple = self.scheduler_classes[0]
lowerCAmelCase :Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase :Any = scheduler_class(**UpperCAmelCase )
lowerCAmelCase :Tuple = [39, 30, 12, 1, 0]
lowerCAmelCase :str = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
lowerCAmelCase :int = self.scheduler_classes[0]
lowerCAmelCase :List[str] = self.get_scheduler_config()
lowerCAmelCase :List[Any] = scheduler_class(**UpperCAmelCase )
lowerCAmelCase :Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase ) | 553 |
"""simple docstring"""
from math import sqrt
def UpperCAmelCase ( a__ = 1_00_00_00 ):
'''simple docstring'''
lowerCAmelCase :int = 0
lowerCAmelCase :int = 0
lowerCAmelCase :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 553 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0.00
__lowercase =0
for resistor in resistors:
if resistor <= 0:
__lowercase =f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(_lowerCAmelCase )
first_sum += 1 / float(_lowerCAmelCase )
index += 1
return 1 / first_sum
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0.00
__lowercase =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__lowercase =f"""Resistor at index {index} has a negative value!"""
raise ValueError(_lowerCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Optional[int] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase)
| 454 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = (EulerDiscreteScheduler,)
__lowercase : str = 10
def SCREAMING_SNAKE_CASE__ ( self:Tuple , **_a:str ):
snake_case__ = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = scheduler.scale_model_input(_a , _a )
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a , generator=_a )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = scheduler.scale_model_input(_a , _a )
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a , generator=_a )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ = sample.to(_a )
for t in scheduler.timesteps:
snake_case__ = scheduler.scale_model_input(_a , _a )
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a , generator=_a )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ = sample.to(_a )
for t in scheduler.timesteps:
snake_case__ = scheduler.scale_model_input(_a , _a )
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a , generator=_a )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 33 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : str):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[str]):
return 1_0_0
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_unet
__lowerCamelCase : List[Any] = self.dummy_movq
__lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = output.images
__lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[str] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
__lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0
__lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0)
__lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A robot, 4k photo'
__lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def snake_case__ ( self : str ) ->Dict:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def snake_case__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCamelCase : List[Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38_015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25_506, "token_str": " accuser"},
] , )
_UpperCamelCase : Dict = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38_015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25_506,
"token_str": " accuser",
},
] , )
_UpperCamelCase : Union[str, Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13_606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2_941, "token_str": " Te"},
] , )
@require_torch
def snake_case__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCamelCase : List[str] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35_676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16_416, "token_str": "ELS"},
] , )
_UpperCamelCase : Dict = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35_676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16_416, "token_str": "ELS"},
] , )
_UpperCamelCase : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2_941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13_606, "token_str": " Clara"},
] , )
_UpperCamelCase : Optional[int] = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35_676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16_416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35_676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16_416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def snake_case__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCamelCase : int = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
@require_torch
def snake_case__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowercase__ )
@slow
@require_tf
def snake_case__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : List[str] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowercase__ )
def snake_case__ ( self : int , lowercase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"sequence": "My name is John", "score": 0.0_0_8, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_0_7, "token": 1_573, "token_str": " Chris"},
] , )
_UpperCamelCase : int = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_5_1,
"token": 2_201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_1_4,
"token": 12_790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase : List[str] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"sequence": "My name is Patrick", "score": 0.0_0_5, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_0_0, "token": 13_606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_0_0, "token": 2_941, "token_str": " Te"},
] , )
@require_torch
def snake_case__ ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCamelCase : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[str] = None
self.run_pipeline_test(lowercase__ , [] )
@require_tf
def snake_case__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCamelCase : str = None
_UpperCamelCase : Union[str, Any] = None
self.run_pipeline_test(lowercase__ , [] )
def snake_case__ ( self : Any , lowercase__ : Dict , lowercase__ : str , lowercase__ : Dict ) ->List[str]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCamelCase : Dict = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
_UpperCamelCase : str = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def snake_case__ ( self : List[str] , lowercase__ : str , lowercase__ : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : int = fill_masker.tokenizer
_UpperCamelCase : Any = fill_masker.model
_UpperCamelCase : Any = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
_UpperCamelCase : Any = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
_UpperCamelCase : Optional[int] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
lowercase__ , [
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
] , )
with self.assertRaises(lowercase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase__ ):
fill_masker("This is" )
self.run_test_top_k(lowercase__ , lowercase__ )
self.run_test_targets(lowercase__ , lowercase__ )
self.run_test_top_k_targets(lowercase__ , lowercase__ )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase__ , lowercase__ )
self.fill_mask_with_multiple_masks(lowercase__ , lowercase__ )
def snake_case__ ( self : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCamelCase : Any = tokenizer.get_vocab()
_UpperCamelCase : List[Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCamelCase : Dict = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ , targets=lowercase__ )
_UpperCamelCase : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
_UpperCamelCase : Optional[int] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase__ )
_UpperCamelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase__ ) )
# Call argument
_UpperCamelCase : Optional[int] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
_UpperCamelCase : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowercase__ )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
_UpperCamelCase : Union[str, Any] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase__ )
_UpperCamelCase : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase__ ) )
# Score equivalence
_UpperCamelCase : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowercase__ )
_UpperCamelCase : List[str] = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase : Optional[int] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase__ ) == set(lowercase__ ):
_UpperCamelCase : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowercase__ )
_UpperCamelCase : Union[str, Any] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
# Raises with invalid
with self.assertRaises(lowercase__ ):
_UpperCamelCase : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase__ ):
_UpperCamelCase : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(lowercase__ ):
_UpperCamelCase : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def snake_case__ ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[str] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ , top_k=2 )
_UpperCamelCase : int = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
_UpperCamelCase : Optional[int] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
_UpperCamelCase : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
def snake_case__ ( self : int , lowercase__ : Tuple , lowercase__ : Tuple ) ->Dict:
'''simple docstring'''
_UpperCamelCase : List[str] = tokenizer.get_vocab()
_UpperCamelCase : Optional[int] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
# top_k=2, ntargets=3
_UpperCamelCase : List[str] = sorted(vocab.keys() )[:3]
_UpperCamelCase : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=lowercase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase : Any = [el["token_str"] for el in sorted(lowercase__ , key=lambda lowercase__ : x["score"] , reverse=lowercase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase__ ).issubset(lowercase__ ):
_UpperCamelCase : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=lowercase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
def snake_case__ ( self : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
_UpperCamelCase : Union[str, Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase : str = sorted(vocab.keys() )[:3]
_UpperCamelCase : Optional[int] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase : Union[str, Any] = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=lowercase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase__ ) , 3 )
def snake_case__ ( self : str , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
_UpperCamelCase : List[str] = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowercase__ , [
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
] , )
| 204 | '''simple docstring'''
def __A ( UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : str = []
_UpperCamelCase : Optional[Any] = set({"(", "[", "{"} )
_UpperCamelCase : int = set({")", "]", "}"} )
_UpperCamelCase : Dict = {"{": "}", "[": "]", "(": ")"}
for i in range(len(UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCAmelCase ) == 0 or (len(UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCAmelCase ) == 0
def __A ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Any = input("Enter sequence of brackets: " )
if is_balanced(UpperCAmelCase ):
print(UpperCAmelCase ,"is balanced" )
else:
print(UpperCAmelCase ,"is not balanced" )
if __name__ == "__main__":
main()
| 204 | 1 |
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = np.array(SCREAMING_SNAKE_CASE__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
# compute the shape of the output matrix
snake_case_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
snake_case_ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
snake_case_ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case_ = 0
snake_case_ = 0
return updated_arr
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = np.array(SCREAMING_SNAKE_CASE__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
# compute the shape of the output matrix
snake_case_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
snake_case_ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
snake_case_ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case_ = 0
snake_case_ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
lowerCAmelCase_ = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 39 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''xlm-roberta'''
def __init__( self , a__=3_0_5_2_2 , a__=7_6_8 , a__=1_2 , a__=1_2 , a__=3_0_7_2 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 632 | 0 |
"""simple docstring"""
import pprint
import requests
lowercase__ : Tuple = '''https://zenquotes.io/api'''
def __lowercase ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __lowercase ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowercase__ : int = random_quotes()
pprint.pprint(response)
| 485 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __lowercase ( _a ):
return np.dot(_a , _a )
class _UpperCAmelCase :
def __init__( self : int , *,
lowercase_ : float = np.inf , lowercase_ : str = "linear" , lowercase_ : float = 0.0 , ):
snake_case_ : Optional[Any] = regularization
snake_case_ : Tuple = gamma
if kernel == "linear":
snake_case_ : int = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
snake_case_ : int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case_ : List[Any] = f"Unknown kernel: {kernel}"
raise ValueError(lowercase_ )
def _snake_case ( self : int , lowercase_ : ndarray , lowercase_ : ndarray ):
return np.dot(lowercase_ , lowercase_ )
def _snake_case ( self : int , lowercase_ : ndarray , lowercase_ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self : Any , lowercase_ : list[ndarray] , lowercase_ : ndarray ):
snake_case_ : Union[str, Any] = observations
snake_case_ : int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case_), ) : List[str] = np.shape(lowercase_ )
def to_minimize(lowercase_ : ndarray ) -> float:
snake_case_ : Tuple = 0
((snake_case_), ) : Optional[Any] = np.shape(lowercase_ )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase_ )
snake_case_ : Optional[Any] = LinearConstraint(lowercase_ , 0 , 0 )
snake_case_ : str = Bounds(0 , self.regularization )
snake_case_ : int = minimize(
lowercase_ , np.ones(lowercase_ ) , bounds=lowercase_ , constraints=[ly_contraint] ).x
snake_case_ : Optional[Any] = l_star
# calculating mean offset of separation plane to points
snake_case_ : List[Any] = 0
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
snake_case_ : Union[str, Any] = s / n
def _snake_case ( self : List[str] , lowercase_ : ndarray ):
snake_case_ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 485 | 1 |
def lowerCAmelCase( __lowerCamelCase = 100_0000 ):
__a = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 559 | def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ) ) )
def lowerCAmelCase( __lowerCamelCase ):
if point:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
for item in point:
if not isinstance(__lowerCamelCase , (int, float) ):
__a = (
'Expected a list of numbers as input, found '
f'''{type(__lowerCamelCase ).__name__}'''
)
raise TypeError(__lowerCamelCase )
else:
__a = f'''Expected a list of numbers as input, found {type(__lowerCamelCase ).__name__}'''
raise TypeError(__lowerCamelCase )
else:
raise ValueError('Missing an input' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(__lowerCamelCase , __lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559 | 1 |
'''simple docstring'''
from __future__ import annotations
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
UpperCamelCase = None
def lowercase__ ( __UpperCamelCase )-> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase__ ( __UpperCamelCase )-> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase__ ( __UpperCamelCase )-> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase__ ( )-> None: # Main function for testing.
UpperCamelCase = Node(1 )
UpperCamelCase = Node(2 )
UpperCamelCase = Node(3 )
UpperCamelCase = Node(4 )
UpperCamelCase = Node(5 )
UpperCamelCase = Node(6 )
UpperCamelCase = Node(7 )
UpperCamelCase = Node(8 )
UpperCamelCase = Node(9 )
print(is_full_binary_tree(_lowercase ) )
print(depth_of_tree(_lowercase ) )
print("""Tree is: """ )
display(_lowercase )
if __name__ == "__main__":
main() | 709 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 | """simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=32 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=[10, 20, 30, 40] ,__UpperCamelCase=[2, 2, 3, 2] ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=["stage2", "stage3", "stage4"] ,__UpperCamelCase=3 ,__UpperCamelCase=None ,) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : Any = num_channels
lowercase_ : Optional[int] = num_stages
lowercase_ : Dict = hidden_sizes
lowercase_ : int = depths
lowercase_ : Optional[Any] = is_training
lowercase_ : Tuple = use_labels
lowercase_ : int = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Any = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : List[Any] = out_features
lowercase_ : List[str] = num_labels
lowercase_ : Optional[int] = scope
lowercase_ : Optional[int] = num_stages
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=512 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=__UpperCamelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=256 ,auxiliary_num_convs=1 ,auxiliary_concat_input=__UpperCamelCase ,loss_ignore_index=255 ,num_labels=self.num_labels ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = UperNetModelTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(__UpperCamelCase )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : Tuple = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = _config_zero_init(__UpperCamelCase )
lowercase_ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@unittest.skip(reason='UperNet does not have tied weights' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : str = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
lowercase_ : Union[str, Any] = Image.open(__SCREAMING_SNAKE_CASE ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
lowercase_ : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCamelCase )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Optional[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
with torch.no_grad():
lowercase_ : str = model(**__UpperCamelCase )
lowercase_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
lowercase_ : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCamelCase )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Optional[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
with torch.no_grad():
lowercase_ : Optional[int] = model(**__UpperCamelCase )
lowercase_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : int = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 425 | 0 |
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return lst
_snake_case : Union[str, Any] = 1
while i < len(lowerCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_snake_case , _snake_case : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_snake_case : Optional[int] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase : str = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 47 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
__snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = {}
with open(_lowercase , 'r' ) as file:
for line_number, line in enumerate(_lowercase ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__UpperCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__UpperCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(_lowercase , _lowercase ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__UpperCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__UpperCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__UpperCamelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = '.'.join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if 'lm_head' in full_key else value[0]
__snake_case = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def _A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(_lowercase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , _lowercase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return is_used
return is_used
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(_lowercase , _lowercase , _lowercase )
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase=False ) -> Dict:
"""simple docstring"""
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(_lowercase )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(_lowercase )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(_lowercase )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
feature_extractor.save_pretrained(_lowercase )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(_lowercase , 'vocab.json' )
if not os.path.isdir(_lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(_lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_lowercase , _lowercase )
__UpperCamelCase = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowercase , )
__UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
__UpperCamelCase = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
__UpperCamelCase = WavaVecaForCTC(_lowercase )
else:
__UpperCamelCase = WavaVecaForPreTraining(_lowercase )
if is_finetuned or is_seq_class:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
__UpperCamelCase = fairseq.tasks.setup_task(_lowercase )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowercase )
__UpperCamelCase = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , not is_finetuned )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__snake_case = parser.parse_args()
__snake_case = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 1 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
_lowerCamelCase : dict = {}
_lowerCamelCase : dict = {}
for state in states_space:
_lowerCamelCase : Optional[Any] = observations_space[0]
_lowerCamelCase : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowerCamelCase : List[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
_lowerCamelCase : List[str] = observations_space[o]
_lowerCamelCase : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowerCamelCase : Dict = ''
_lowerCamelCase : int = -1
for k_state in states_space:
_lowerCamelCase : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowerCamelCase : Dict = probability
_lowerCamelCase : int = k_state
# Update probabilities and pointers dicts
_lowerCamelCase : List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowerCamelCase : Union[str, Any] = arg_max
# The final observation
_lowerCamelCase : Tuple = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
_lowerCamelCase : Optional[int] = ''
_lowerCamelCase : str = -1
for k_state in states_space:
_lowerCamelCase : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowerCamelCase : int = probability
_lowerCamelCase : Optional[int] = k_state
_lowerCamelCase : Any = arg_max
# Process pointers backwards
_lowerCamelCase : Optional[Any] = last_state
_lowerCamelCase : Union[str, Any] = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
_lowerCamelCase : Dict = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def _snake_case ( lowercase__ , lowercase__ ):
_validate_list(lowercase__ , 'observations_space' )
_validate_list(lowercase__ , 'states_space' )
def _snake_case ( lowercase__ , lowercase__ ):
if not isinstance(_object , lowercase__ ):
_lowerCamelCase : Tuple = f'''{var_name} must be a list'''
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
_lowerCamelCase : int = f'''{var_name} must be a list of strings'''
raise ValueError(lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , ):
_validate_dict(lowercase__ , 'initial_probabilities' , lowercase__ )
_validate_nested_dict(lowercase__ , 'transition_probabilities' )
_validate_nested_dict(lowercase__ , 'emission_probabilities' )
def _snake_case ( lowercase__ , lowercase__ ):
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
if not isinstance(_object , lowercase__ ):
_lowerCamelCase : Tuple = f'''{var_name} must be a dict'''
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
_lowerCamelCase : Optional[int] = f'''{var_name} all keys must be strings'''
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
_lowerCamelCase : Optional[int] = 'nested dictionary ' if nested else ''
_lowerCamelCase : Optional[Any] = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 630 | 0 |
from __future__ import annotations
from typing import Any
class _UpperCamelCase:
def __init__( self : Tuple , _lowerCamelCase : int ):
_UpperCAmelCase : Optional[int] = num_of_nodes
_UpperCAmelCase : list[list[int]] = []
_UpperCAmelCase : dict[int, int] = {}
def a__ ( self : str , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
self.m_edges.append([u_node, v_node, weight] )
def a__ ( self : Union[str, Any] , _lowerCamelCase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def a__ ( self : Union[str, Any] , _lowerCamelCase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_UpperCAmelCase : int = self.find_component(_lowerCamelCase )
def a__ ( self : List[str] , _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int ):
if component_size[u_node] <= component_size[v_node]:
_UpperCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
_UpperCAmelCase : int = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def a__ ( self : Any ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : int = 0
_UpperCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_UpperCAmelCase : Optional[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : str = self.m_component[u]
_UpperCAmelCase : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_UpperCAmelCase : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Union[str, Any] = self.m_component[u]
_UpperCAmelCase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_UpperCAmelCase : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Union[str, Any] = ["""pixel_values"""]
def __init__( self : Optional[int] , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[int, float] = 1 / 2_55 , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : bool = True , **_lowerCamelCase : str , ):
super().__init__(**_lowerCamelCase )
_UpperCAmelCase : Tuple = size if size is not None else {"shortest_edge": 2_24}
_UpperCAmelCase : List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
_UpperCAmelCase : int = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_UpperCAmelCase : List[str] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name="crop_size" )
_UpperCAmelCase : List[str] = do_resize
_UpperCAmelCase : Dict = size
_UpperCAmelCase : Dict = resample
_UpperCAmelCase : Optional[Any] = do_center_crop
_UpperCAmelCase : Dict = crop_size
_UpperCAmelCase : int = do_rescale
_UpperCAmelCase : Optional[Any] = rescale_factor
_UpperCAmelCase : Dict = do_normalize
_UpperCAmelCase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase : Union[str, Any] = do_convert_rgb
def a__ ( self : List[Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Dict , ):
_UpperCAmelCase : str = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase : List[str] = get_resize_output_image_size(_lowerCamelCase , size=size["shortest_edge"] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def a__ ( self : List[str] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Union[str, Any] , ):
_UpperCAmelCase : int = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_lowerCamelCase , size=(size["height"], size["width"]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def a__ ( self : int , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[int, float] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[str] , ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def a__ ( self : str , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Tuple , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def a__ ( self : Dict , _lowerCamelCase : ImageInput , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : int = None , _lowerCamelCase : bool = None , _lowerCamelCase : float = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCamelCase : Union[str, Any] , ):
_UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Optional[int] = size if size is not None else self.size
_UpperCAmelCase : Tuple = get_size_dict(_lowerCamelCase , param_name="size" , default_to_square=_lowerCamelCase )
_UpperCAmelCase : List[str] = resample if resample is not None else self.resample
_UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[str] = get_size_dict(_lowerCamelCase , param_name="crop_size" , default_to_square=_lowerCamelCase )
_UpperCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : int = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase : str = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase : Union[str, Any] = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase : int = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase : int = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase : List[str] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase : Union[str, Any] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase : Tuple = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
_UpperCAmelCase : Tuple = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
_UpperCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 328 | 1 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''') | 8 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = TaConfig.from_json_file(__lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case__ : int = TaForConditionalGeneration(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 252 | 0 |
'''simple docstring'''
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
SCREAMING_SNAKE_CASE : Union[str, Any] = "facebook/wmt19-en-de"
SCREAMING_SNAKE_CASE : Optional[int] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
SCREAMING_SNAKE_CASE : str = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
SCREAMING_SNAKE_CASE : Tuple = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
SCREAMING_SNAKE_CASE : Union[str, Any] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
SCREAMING_SNAKE_CASE : Optional[int] = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 701 |
'''simple docstring'''
from typing import Any
def _UpperCamelCase ( lowerCAmelCase__: list ) -> list[Any]:
if not input_list:
return []
SCREAMING_SNAKE_CASE_ = [input_list.count(lowerCAmelCase__ ) for value in input_list]
SCREAMING_SNAKE_CASE_ = max(lowerCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[Any] = '''facebook/bart-large-mnli'''
A__ : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
A__ : List[Any] = '''text_classifier'''
A__ : Any = AutoTokenizer
A__ : Union[str, Any] = AutoModelForSequenceClassification
A__ : str = ['''text''', ['''text''']]
A__ : Tuple = ['''text''']
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().setup()
_snake_case = self.model.config
_snake_case = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_snake_case = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = outputs.logits
_snake_case = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 103 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = MvpTokenizer
_UpperCamelCase : Any = MvpTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = filter_roberta_detectors
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , padding=snake_case , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("labels" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(text_target=snake_case , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization."]
UpperCamelCase__ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , text_target=snake_case , return_tensors="pt" )
UpperCamelCase__ = inputs["input_ids"]
UpperCamelCase__ = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = "A, <mask> AllenNLP sentence."
UpperCamelCase__ = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
UpperCamelCase__ = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 551 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a ( __a , __a , __a , __a="attention" ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
UpperCamelCase__ :List[str] = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
UpperCamelCase__ :Optional[int] = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
UpperCamelCase__ :Optional[Any] = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def a ( __a , __a , __a , __a=False ) -> Dict:
'''simple docstring'''
if split_mlp_wi:
UpperCamelCase__ :Dict = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
UpperCamelCase__ :Tuple = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
UpperCamelCase__ :int = (wi_a, wi_a)
else:
UpperCamelCase__ :Tuple = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
UpperCamelCase__ :Any = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def a ( __a , __a , __a , __a ) -> Union[str, Any]:
'''simple docstring'''
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def a ( __a , *, __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :str = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase__ :List[Any] = {'''/'''.join(UpperCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase__ :int = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase__ )
UpperCamelCase__ :List[str] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase__ :Optional[Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''encoder''' , '''attention''' )
UpperCamelCase__ :str = layer_norm
UpperCamelCase__ :List[Any] = k.T
UpperCamelCase__ :List[Any] = o.T
UpperCamelCase__ :Union[str, Any] = q.T
UpperCamelCase__ :Tuple = v.T
# Block i, layer 1 (MLP).
UpperCamelCase__ :int = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ :Any = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''encoder''' , UpperCAmelCase__ )
UpperCamelCase__ :Union[str, Any] = layer_norm
if split_mlp_wi:
UpperCamelCase__ :Tuple = wi[0].T
UpperCamelCase__ :List[Any] = wi[1].T
else:
UpperCamelCase__ :int = wi.T
UpperCamelCase__ :Optional[Any] = wo.T
UpperCamelCase__ :Optional[Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
UpperCamelCase__ :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ :List[Any] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Any = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''self_attention''' )
UpperCamelCase__ :Any = layer_norm
UpperCamelCase__ :Union[str, Any] = k.T
UpperCamelCase__ :Any = o.T
UpperCamelCase__ :int = q.T
UpperCamelCase__ :Dict = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase__ :int = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase__ :List[Any] = layer_norm
UpperCamelCase__ :List[str] = k.T
UpperCamelCase__ :Optional[int] = o.T
UpperCamelCase__ :List[str] = q.T
UpperCamelCase__ :List[Any] = v.T
# Block i, layer 2 (MLP).
UpperCamelCase__ :str = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ :int = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , UpperCAmelCase__ )
UpperCamelCase__ :List[str] = layer_norm
if split_mlp_wi:
UpperCamelCase__ :List[str] = wi[0].T
UpperCamelCase__ :List[str] = wi[1].T
else:
UpperCamelCase__ :List[str] = wi.T
UpperCamelCase__ :str = wo.T
UpperCamelCase__ :Tuple = old['''decoder/decoder_norm/scale''']
UpperCamelCase__ :int = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase__ :Any = old['''decoder/logits_dense/kernel'''].T
return new
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ :str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ :Dict = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase__ :Tuple = state_dict['''shared.weight''']
return state_dict
def a ( __a , __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Dict = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
UpperCamelCase__ :List[Any] = convert_tax_to_pytorch(UpperCAmelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase__ )
UpperCamelCase__ :str = make_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
def a ( __a , __a , __a , __a = False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[str] = TaConfig.from_json_file(UpperCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase__ :Dict = TaEncoderModel(UpperCAmelCase__ )
else:
UpperCamelCase__ :Union[str, Any] = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase__ )
print('''Done''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 714 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = n
UpperCamelCase__ :Tuple = [None] * self.n
UpperCamelCase__ :str = 0 # index of the first element
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Dict = 0
def __len__( self ):
'''simple docstring'''
return self.size
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.size == 0
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCamelCase__ :List[Any] = data
UpperCamelCase__ :List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCamelCase__ :Dict = self.array[self.front]
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :Union[str, Any] = (self.front + 1) % self.n
self.size -= 1
return temp | 280 | 0 |
'''simple docstring'''
def A_( A : int):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A_( A : int = 100_0000):
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(A) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 |
def lowerCAmelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 154 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def a__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCAmelCase_ : List[Any] = to_pil_image(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = pil_image.size
UpperCAmelCase_ : Optional[Any] = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type="dict" , config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCAmelCase_ : Optional[Any] = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()]
UpperCAmelCase_ : List[Any] = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
UpperCAmelCase_ : Union[str, Any] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
UpperCAmelCase_ : Union[str, Any] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
UpperCAmelCase_ : Tuple = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
UpperCAmelCase_ : Any = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase_ : List[str] = []
for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = [x, y, x + w, y + h]
actual_boxes.append(_SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
UpperCAmelCase_ : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : int =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = "" ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Dict = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[int] = get_size_dict(_snake_case )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : str = resample
UpperCAmelCase_ : Optional[int] = apply_ocr
UpperCAmelCase_ : str = ocr_lang
UpperCAmelCase_ : List[str] = tesseract_config
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCAmelCase_ : Tuple = (size["height"], size["width"])
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(_snake_case )
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
UpperCAmelCase_ : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase_ : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase_ : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase_ : Union[str, Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = [to_numpy_array(_snake_case ) for image in images]
if apply_ocr:
requires_backends(self ,"pytesseract" )
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Union[str, Any] = []
for image in images:
UpperCAmelCase_ , UpperCAmelCase_ : int = apply_tesseract(_snake_case ,_snake_case ,_snake_case )
words_batch.append(_snake_case )
boxes_batch.append(_snake_case )
if do_resize:
UpperCAmelCase_ : Tuple = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCAmelCase_ : List[Any] = [flip_channel_order(_snake_case ) for image in images]
UpperCAmelCase_ : str = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
UpperCAmelCase_ : List[str] = BatchFeature(data={"pixel_values": images} ,tensor_type=_snake_case )
if apply_ocr:
UpperCAmelCase_ : List[str] = words_batch
UpperCAmelCase_ : Any = boxes_batch
return data
| 323 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case (__SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case ,"width_multiplier" ) )
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=64 ,_snake_case=2 ,_snake_case=3 ,_snake_case="swish" ,_snake_case=3 ,_snake_case=32 ,_snake_case=0.1 ,_snake_case=0.02 ,_snake_case=True ,_snake_case=True ,_snake_case=10 ,_snake_case=None ,_snake_case=0.25 ,_snake_case=0.0 ,_snake_case=0.0 ,):
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = make_divisible(5_12 * width_multiplier ,divisor=8 )
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = conv_kernel_size
UpperCAmelCase_ : Optional[int] = output_stride
UpperCAmelCase_ : str = classifier_dropout_prob
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : Optional[int] = width_multiplier
UpperCAmelCase_ : List[Any] = ffn_dropout
UpperCAmelCase_ : Tuple = attn_dropout
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase_ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self ):
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Any = MobileViTVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileViTVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Dict = MobileViTVaForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Any = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
UpperCAmelCase_ : Tuple = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs
UpperCAmelCase_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__A : List[str] =(
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : Dict =False
__A : Optional[int] =False
__A : int =False
__A : Optional[int] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = MobileViTVaModelTester(self )
UpperCAmelCase_ : Optional[Any] = MobileViTVaConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def UpperCamelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(_snake_case )
UpperCAmelCase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Dict = 5
self.assertEqual(len(_snake_case ) ,_snake_case )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ : str = 2
for i in range(len(_snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = MobileViTVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_snake_case )
UpperCAmelCase_ : Optional[Any] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**_snake_case )
# verify the logits
UpperCAmelCase_ : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_snake_case )
UpperCAmelCase_ : Any = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Union[str, Any] = model.to(_snake_case )
UpperCAmelCase_ : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**_snake_case )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,_snake_case )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=_snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_snake_case ,atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Any = model.to(_snake_case )
UpperCAmelCase_ : Dict = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : str = prepare_img()
UpperCAmelCase_ : int = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_snake_case )
UpperCAmelCase_ : Union[str, Any] = outputs.logits.detach().cpu()
UpperCAmelCase_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_snake_case ,target_sizes=[(50, 60)] )
UpperCAmelCase_ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,_snake_case )
UpperCAmelCase_ : Any = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
UpperCAmelCase_ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,_snake_case )
| 323 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=99 , lowerCamelCase__=13 , lowerCamelCase__=16 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=30 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ) -> str:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = d_model
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = eos_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = use_cache
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = None
__lowerCamelCase = decoder_seq_length
__lowerCamelCase = 2
__lowerCamelCase = 1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
__lowerCamelCase = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(lowerCamelCase__ )['last_hidden_state']
__lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['last_hidden_state']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
snake_case_ = (TrOCRForCausalLM,) if is_torch_available() else ()
snake_case_ = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
snake_case_ = True
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
| 469 |
from string import ascii_uppercase
__A = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
__lowerCamelCase = ''
__lowerCamelCase = 0
__lowerCamelCase = 0
while div != 1:
__lowerCamelCase , __lowerCamelCase = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
__lowerCamelCase = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
__lowerCamelCase = str(UpperCamelCase__ )
new_value += actual_value
__lowerCamelCase = num // base
__lowerCamelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 469 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = 2
UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A )
if n > 1:
factors.append(A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a_ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
a_ = self.vocab_size - 1
def lowerCAmelCase__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = OpenAIGPTModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , head_mask=UpperCAmelCase )
a_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
a_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = OpenAIGPTLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = OpenAIGPTDoubleHeadsModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = self.num_labels
a_ = OpenAIGPTForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class a_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase__ : str = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
a_ = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase , )
a_ = inputs_dict["""labels"""]
a_ = inputs_dict["""labels"""]
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase , )
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def lowerCAmelCase__ ( self ):
a_ = OpenAIGPTModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase , n_embd=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = OpenAIGPTModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ):
a_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCAmelCase )
a_ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=UpperCAmelCase ) # the president is
a_ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase )
| 263 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ =datasets.logging.get_logger(__name__)
lowercase__ ='\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
lowercase__ ='\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
lowercase__ ='\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if self.config_name == "default":
a_ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
a_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ):
if gpus is None:
a_ = 1 if torch.cuda.is_available() else 0
a_ = {"""src""": sources, """mt""": predictions, """ref""": references}
a_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
a_ , a_ = self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 263 | 1 |
from statistics import mean
import numpy as np
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
# Number of processes finished
SCREAMING_SNAKE_CASE_ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
SCREAMING_SNAKE_CASE_ = [0] * no_of_process
# List to include calculation results
SCREAMING_SNAKE_CASE_ = [0] * no_of_process
# Sort by arrival time.
SCREAMING_SNAKE_CASE_ = [burst_time[i] for i in np.argsort(_UpperCamelCase )]
SCREAMING_SNAKE_CASE_ = [process_name[i] for i in np.argsort(_UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
SCREAMING_SNAKE_CASE_ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
SCREAMING_SNAKE_CASE_ = arrival_time[i]
SCREAMING_SNAKE_CASE_ = 0
# Index showing the location of the process being performed
SCREAMING_SNAKE_CASE_ = 0
# Saves the current response ratio.
SCREAMING_SNAKE_CASE_ = 0
for i in range(0 , _UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
SCREAMING_SNAKE_CASE_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
SCREAMING_SNAKE_CASE_ = temp
SCREAMING_SNAKE_CASE_ = i
# Calculate the turn around time
SCREAMING_SNAKE_CASE_ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
SCREAMING_SNAKE_CASE_ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [0] * no_of_process
for i in range(0 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A : Optional[int] = 5
A : str = ["A", "B", "C", "D", "E"]
A : List[str] = [1, 2, 3, 4, 5]
A : Tuple = [1, 2, 3, 4, 5]
A : Optional[int] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A : Optional[int] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
f"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(f"average waiting time : {mean(waiting_time):.5f}")
print(f"average turn around time : {mean(turn_around_time):.5f}")
| 716 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1e-3 ) )
@slow
def __A ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1e-3 ) )
| 356 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( __lowerCamelCase ):
@staticmethod
@abstractmethod
def __UpperCAmelCase ( _lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self ):
raise NotImplementedError()
| 79 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
"""simple docstring"""
import os
lowercase__ :Any = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Union[str, Any] = 0
while index < len(UpperCAmelCase_ ) - 1:
__UpperCAmelCase : Dict = SYMBOLS[numerals[index]]
__UpperCAmelCase : Any = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
__UpperCAmelCase : int = ''''''
__UpperCAmelCase : Union[str, Any] = num // 10_00
numerals += m_count * "M"
num %= 10_00
__UpperCAmelCase : List[str] = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__UpperCAmelCase : List[Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCamelCase_ ( UpperCAmelCase_ = "/p089_roman.txt" ) ->int:
"""simple docstring"""
__UpperCAmelCase : Dict = 0
with open(os.path.dirname(UpperCAmelCase_ ) + roman_numerals_filename ) as filea:
__UpperCAmelCase : Dict = filea.readlines()
for line in lines:
__UpperCAmelCase : Union[str, Any] = line.strip()
__UpperCAmelCase : Optional[Any] = parse_roman_numerals(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = generate_roman_numerals(UpperCAmelCase_ )
savings += len(UpperCAmelCase_ ) - len(UpperCAmelCase_ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""") | 374 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Dict = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase : Tuple = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ :Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 374 | 1 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __A ( a_ :Tuple , a_ :List[Any]) -> Union[str, Any]:
assert isinstance(a_ , a_)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True])
def __A ( a_ :Any , a_ :Union[str, Any] , a_ :List[str] , a_ :Union[str, Any]) -> Dict:
__a : List[str] = tmp_path / '''cache'''
__a : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a : Tuple = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=a_ , keep_in_memory=a_).read()
_check_sql_dataset(a_ , a_)
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __A ( a_ :Any , a_ :Any , a_ :Optional[int] , a_ :Optional[Any]) -> List[str]:
__a : Optional[Any] = tmp_path / '''cache'''
__a : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__a : List[str] = features.copy() if features else default_expected_features
__a : str = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
__a : List[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=a_ , cache_dir=a_).read()
_check_sql_dataset(a_ , a_)
def __A ( a_ :Optional[Any]) -> List[Any]:
with contextlib.closing(sqlitea.connect(a_)) as con:
__a : Optional[Any] = con.cursor()
cur.execute('''SELECT * FROM dataset''')
for row in cur:
yield row
@require_sqlalchemy
def __A ( a_ :Union[str, Any] , a_ :Optional[Any] , a_ :List[str]) -> Optional[int]:
__a : Optional[int] = tmp_path / '''cache'''
__a : List[Any] = os.path.join(a_ , '''tmp.sql''')
__a : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=a_).read()
SqlDatasetWriter(a_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1).write()
__a : Dict = iter_sql_file(a_)
__a : Any = iter_sql_file(a_)
for rowa, rowa in zip(a_ , a_):
assert rowa == rowa
@require_sqlalchemy
def __A ( a_ :Optional[int] , a_ :Union[str, Any] , a_ :int) -> Optional[Any]:
__a : Union[str, Any] = tmp_path / '''cache'''
__a : str = os.path.join(a_ , '''tmp.sql''')
__a : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=a_).read()
SqlDatasetWriter(a_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2).write()
__a : Optional[Any] = iter_sql_file(a_)
__a : Tuple = iter_sql_file(a_)
for rowa, rowa in zip(a_ , a_):
assert rowa == rowa
@require_sqlalchemy
def __A ( a_ :int , a_ :str , a_ :int) -> List[str]:
__a : int = tmp_path / '''cache'''
__a : int = os.path.join(a_ , '''tmp.sql''')
__a : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=a_).read()
with pytest.raises(a_):
SqlDatasetWriter(a_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0).write() | 52 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
snake_case_ : List[Any] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : str=None , __magic_name__ : Tuple=1 ) -> str:
lowerCamelCase_ : List[Any] = tokenizer
lowerCamelCase_ : List[Any] = dataset
lowerCamelCase_ : Dict = len(__magic_name__ ) if n_tasks is None else n_tasks
lowerCamelCase_ : List[str] = n_copies
def __iter__( self : Optional[int] ) -> int:
lowerCamelCase_ : Any = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
lowerCamelCase_ : int = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any] ) -> str:
lowerCamelCase_ : Dict = start_length
lowerCamelCase_ : List[str] = eof_strings
lowerCamelCase_ : Optional[int] = tokenizer
def __call__( self : Any , __magic_name__ : str , __magic_name__ : Optional[Any] , **__magic_name__ : Union[str, Any] ) -> Any:
lowerCamelCase_ : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ : List[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__magic_name__ )
def __a ( __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = re.split("(%s)" % "|".join(__UpperCAmelCase ) , __UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any]=20 , **__UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Dict = defaultdict(__UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCAmelCase ) ):
with torch.no_grad():
lowerCamelCase_ : List[Any] = batch["ids"].shape[-1]
lowerCamelCase_ : List[Any] = accelerator.unwrap_model(__UpperCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__UpperCAmelCase , **__UpperCAmelCase )
# each task is generated batch_size times
lowerCamelCase_ : Optional[int] = batch["task_id"].repeat(__UpperCAmelCase )
lowerCamelCase_ : Tuple = accelerator.pad_across_processes(
__UpperCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
lowerCamelCase_ : Optional[int] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCAmelCase , __UpperCAmelCase ):
gen_token_dict[task].append(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = [[] for _ in range(__UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ : str = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
code_gens[task].append(remove_last_block(__UpperCAmelCase ) )
return code_gens
def __a ( ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = HfArgumentParser(__UpperCAmelCase )
lowerCamelCase_ : str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ : Tuple = "false"
if args.num_workers is None:
lowerCamelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__UpperCAmelCase )
# Load model and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ : str = tokenizer.eos_token
lowerCamelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ : Any = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __UpperCAmelCase , __UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ : Optional[Any] = load_dataset("openai_humaneval" )
lowerCamelCase_ : List[str] = load_metric("code_eval" )
lowerCamelCase_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
lowerCamelCase_ : List[Any] = args.n_samples // args.batch_size
lowerCamelCase_ : str = TokenizedDataset(__UpperCAmelCase , human_eval["test"] , n_copies=__UpperCAmelCase , n_tasks=__UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ : Dict = DataLoader(__UpperCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ : Dict = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
lowerCamelCase_ , lowerCamelCase_ : str = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Any = complete_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , n_tasks=__UpperCAmelCase , batch_size=args.batch_size , **__UpperCAmelCase , )
if accelerator.is_main_process:
lowerCamelCase_ : Union[str, Any] = []
for task in tqdm(range(__UpperCAmelCase ) ):
lowerCamelCase_ : int = human_eval["test"][task]["test"]
lowerCamelCase_ : Any = f"check({human_eval['test'][task]['entry_point']})"
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_ , lowerCamelCase_ : Any = code_eval_metric.compute(
references=__UpperCAmelCase , predictions=__UpperCAmelCase , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 488 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( A : list[list[int]]):
'''simple docstring'''
for i in range(1 , len(matrix[0])):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A)):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A)):
for j in range(1 , len(matrix[0])):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1])
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.nn.Linear(2 , 4)
UpperCamelCase : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0)
UpperCamelCase : List[Any] = torch.optim.lr_scheduler.OneCycleLR(A , max_lr=0.01 , steps_per_epoch=2 , epochs=1)
UpperCamelCase : List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
UpperCamelCase : List[str] = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def A__ ( A : int):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(A)
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : int = Accelerator(cpu=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Dict = Accelerator()
UpperCamelCase : Any = GradientState()
assert state.num_steps == 1
UpperCamelCase : List[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCamelCase : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = create_components()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCamelCase , **lowerCamelCase ):
pass
with patch("torch.cuda.set_device" , lowerCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
UpperCamelCase : Union[str, Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase : str = get_signature(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase : List[Any] = get_signature(lowerCamelCase )
# saving hook
def save_config(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCamelCase : str = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowerCamelCase , "data.json" ) , "w" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
# loading hook
def load_config(lowerCamelCase , lowerCamelCase ):
with open(os.path.join(lowerCamelCase , "data.json" ) , "r" ) as f:
UpperCamelCase : Optional[int] = json.load(lowerCamelCase )
UpperCamelCase : int = config["class_name"]
UpperCamelCase : Dict = accelerator.register_save_state_pre_hook(lowerCamelCase )
UpperCamelCase : Union[str, Any] = accelerator.register_load_state_pre_hook(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match with hooks
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : Any = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = create_components()
UpperCamelCase : int = None
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : List[str] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = create_components()
UpperCamelCase : Union[str, Any] = [1, 2, 3]
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map={"": 0} , )
UpperCamelCase : str = Accelerator()
# This should work
UpperCamelCase : Any = accelerator.prepare(lowerCamelCase )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Optional[Any] = Accelerator()
with init_empty_weights():
UpperCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCamelCase : Optional[int] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Dict = "cpu"
UpperCamelCase : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowerCamelCase , load_in_abit=lowerCamelCase , llm_inta_enable_fpaa_cpu_offload=lowerCamelCase )
# This should not work and get value error
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : Dict = accelerator.prepare(lowerCamelCase )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Dict = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCamelCase : List[Any] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Any = 1
UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map=lowerCamelCase , )
UpperCamelCase : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : Dict = accelerator.prepare(lowerCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
UpperCamelCase : Union[str, Any] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Tuple = 1
UpperCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map=lowerCamelCase , )
UpperCamelCase : Tuple = Accelerator()
# This should work
UpperCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase )
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : int = torch.nn.Linear(10 , 10 )
UpperCamelCase : Dict = torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCamelCase : Optional[Any] = Accelerator(cpu=lowerCamelCase )
UpperCamelCase : Any = accelerator.prepare(lowerCamelCase )
| 435 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __SCREAMING_SNAKE_CASE ( A__ ):
@slow
@require_torch
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
lowercase : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase : int = bertabert.config.encoder.vocab_size
lowercase : Dict = tokenizer.sep_token_id
lowercase : List[Any] = tokenizer.cls_token_id
lowercase : Optional[Any] = 128
lowercase : Any = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
lowercase : List[Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
lowercase : int = train_dataset.select(range(32 ) )
lowercase : Any = val_dataset.select(range(16 ) )
lowercase : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase : str = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE__ , max_length=512 )
lowercase : int = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE__ , max_length=128 )
lowercase : Tuple = inputs.input_ids
lowercase : Any = inputs.attention_mask
lowercase : Optional[Any] = outputs.input_ids
lowercase : Optional[Any] = outputs.input_ids.copy()
lowercase : Union[str, Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowercase : Dict = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE__ ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = pred.label_ids
lowercase : Any = pred.predictions
# all unnecessary tokens are removed
lowercase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) / len(SCREAMING_SNAKE_CASE__ )
return {"accuracy": accuracy}
# map train dataset
lowercase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
lowercase : Dict = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
lowercase : str = self.get_auto_remove_tmp_dir()
lowercase : Tuple = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE__ , per_device_train_batch_size=SCREAMING_SNAKE_CASE__ , per_device_eval_batch_size=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , evaluation_strategy='''steps''' , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase : List[Any] = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , )
# start training
trainer.train()
| 319 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __SCREAMING_SNAKE_CASE :
@property
def __lowerCamelCase ( self ):
return self.get_dummy_input()
@property
def __lowerCamelCase ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ):
lowercase : Optional[int] = 4
lowercase : Dict = 32
lowercase : List[str] = (32, 32)
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : Optional[int] = torch.device(SCREAMING_SNAKE_CASE__ )
lowercase : int = (batch_size, num_channels) + sizes
lowercase : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {'''hidden_states''': hidden_states}
if include_temb:
lowercase : List[Any] = 128
lowercase : List[Any] = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
if include_res_hidden_states_tuple:
lowercase : List[Any] = torch.manual_seed(1 )
lowercase : Optional[Any] = (randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ),)
if include_encoder_hidden_states:
lowercase : Optional[Any] = floats_tensor((batch_size, 32, 32) ).to(SCREAMING_SNAKE_CASE__ )
if include_skip_sample:
lowercase : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
return dummy_input
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
lowercase : Optional[int] = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
lowercase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.prepare_init_args_and_inputs_for_common()
lowercase : List[str] = self.block_class(**SCREAMING_SNAKE_CASE__ )
unet_block.to(SCREAMING_SNAKE_CASE__ )
unet_block.eval()
with torch.no_grad():
lowercase : Tuple = unet_block(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
lowercase : Optional[Any] = output[0, -1, -3:, -3:]
lowercase : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE__ , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __lowerCamelCase ( self ):
lowercase , lowercase : Dict = self.prepare_init_args_and_inputs_for_common()
lowercase : Optional[int] = self.block_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = output[0]
lowercase : int = torch.device(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
| 319 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
def __init__( self, A, A=13, A=30, A=2, A=3, A=True, A=True, A=32, A=2, A=4, A=37, A="gelu", A=0.1, A=0.1, A=10, A=0.02, A=3, A=None, ):
"""simple docstring"""
lowerCamelCase : Any = parent
lowerCamelCase : Dict = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[Any] = patch_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : Dict = is_training
lowerCamelCase : List[Any] = use_labels
lowerCamelCase : Dict = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : str = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : str = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = type_sequence_label_size
lowerCamelCase : Any = initializer_range
lowerCamelCase : Optional[int] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase : List[Any] = (image_size // patch_size) ** 2
lowerCamelCase : Optional[int] = num_patches + 1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Any = None
if self.use_labels:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=A, initializer_range=self.initializer_range, )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : List[str] = TFViTModel(config=A )
lowerCamelCase : Tuple = model(A, training=A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase : Optional[Any] = self.image_size // 2
lowerCamelCase : List[Any] = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase : Any = model(A, interpolate_pos_encoding=A, training=A )
lowerCamelCase : List[str] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : Dict = self.type_sequence_label_size
lowerCamelCase : List[Any] = TFViTForImageClassification(A )
lowerCamelCase : int = model(A, labels=A, training=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase : Optional[int] = self.image_size // 2
lowerCamelCase : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase : int = model(A, interpolate_pos_encoding=A, training=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : int = TFViTForImageClassification(A )
lowerCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = config_and_inputs
lowerCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( a__ , a__ , unittest.TestCase):
_lowerCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_lowerCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = TFViTModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A, tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = model_class(A )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
lowerCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A )
def UpperCAmelCase ( ):
lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowerCamelCase : List[Any] = self.default_image_processor
lowerCamelCase : Tuple = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=A, return_tensors='tf' )
# forward pass
lowerCamelCase : List[Any] = model(**A )
# verify the logits
lowerCamelCase : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, A )
lowerCamelCase : List[Any] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3], A, atol=1e-4 )
| 449 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase ( UpperCAmelCase__ : str = "AAPL"):
lowerCamelCase : List[Any] = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCamelCase : Dict = BeautifulSoup(requests.get(UpperCAmelCase__).text , 'html.parser')
lowerCamelCase : Dict = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 449 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
if len(__SCREAMING_SNAKE_CASE ) == 0:
return array
snake_case__ , snake_case__ : Any = min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )
# Compute the variables
snake_case__ : Optional[int] = _max - _min + 1
snake_case__ , snake_case__ : Optional[int] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
snake_case__ : Tuple = i - _min
snake_case__ : Tuple = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
snake_case__ : Union[str, Any] = 0
for i in range(__SCREAMING_SNAKE_CASE ):
while holes_repeat[i] > 0:
snake_case__ : List[str] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = input("Enter numbers separated by comma:\n")
A_ = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 270 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ :
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=13 , __lowerCamelCase : str=30 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Union[str, Any]=10 , __lowerCamelCase : int=0.0_2 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=[0, 1, 2, 3] , ):
snake_case__ : Optional[int] = parent
snake_case__ : Union[str, Any] = 100
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Dict = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Dict = num_channels
snake_case__ : List[Any] = is_training
snake_case__ : Optional[Any] = use_labels
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : str = scope
snake_case__ : Optional[Any] = out_indices
snake_case__ : List[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = (image_size // patch_size) ** 2
snake_case__ : Dict = num_patches + 1
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Optional[int] ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _lowerCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
snake_case__ : Tuple = BeitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : int = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
snake_case__ : Optional[int] = BeitForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
snake_case__ : List[Any] = self.type_sequence_label_size
snake_case__ : int = BeitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Union[str, Any] = 1
snake_case__ : Any = BeitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
snake_case__ : str = self.num_labels
snake_case__ : int = BeitForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case__ : Dict = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = config_and_inputs
snake_case__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def _lowerCAmelCase ( self : Any ):
snake_case__ : Any = BeitModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Any ):
pass
def _lowerCAmelCase ( self : int ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowerCAmelCase ( self : int ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__lowerCamelCase )
snake_case__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
def _lowerCAmelCase ( self : Dict ):
if not self.model_tester.is_training:
return
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__lowerCamelCase ), BeitForMaskedImageModeling]:
continue
snake_case__ : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
snake_case__ : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
snake_case__ : Optional[Any] = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCAmelCase ( self : Any ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : List[str] = False
snake_case__ : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__lowerCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case__ : Any = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
snake_case__ : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
snake_case__ : int = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = BeitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__ ( ) -> int:
snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : List[str] ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Tuple = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__lowerCamelCase )
snake_case__ : Any = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=__lowerCamelCase , return_tensors='pt' ).pixel_values.to(__lowerCamelCase )
# prepare bool_masked_pos
snake_case__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(pixel_values=__lowerCamelCase , bool_masked_pos=__lowerCamelCase )
snake_case__ : str = outputs.logits
# verify the logits
snake_case__ : Dict = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : int = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowerCamelCase , atol=1E-2 ) )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__lowerCamelCase )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__lowerCamelCase )
snake_case__ : Dict = outputs.logits
# verify the logits
snake_case__ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : Union[str, Any] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
snake_case__ : Any = 281
self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase )
@slow
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Any = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__lowerCamelCase )
snake_case__ : int = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : List[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__lowerCamelCase )
snake_case__ : Dict = outputs.logits
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 21841) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : Dict = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
snake_case__ : Optional[Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase )
@slow
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
snake_case__ : str = model.to(__lowerCamelCase )
snake_case__ : List[str] = BeitImageProcessor(do_resize=__lowerCamelCase , size=640 , do_center_crop=__lowerCamelCase )
snake_case__ : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
snake_case__ : Union[str, Any] = Image.open(ds[0]['file'] )
snake_case__ : Tuple = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**__lowerCamelCase )
snake_case__ : Any = outputs.logits
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : Tuple = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
snake_case__ : Optional[Any] = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__lowerCamelCase , )
else:
snake_case__ : Union[str, Any] = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : str ):
snake_case__ : Tuple = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
snake_case__ : int = model.to(__lowerCamelCase )
snake_case__ : Optional[Any] = BeitImageProcessor(do_resize=__lowerCamelCase , size=640 , do_center_crop=__lowerCamelCase )
snake_case__ : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
snake_case__ : Optional[int] = Image.open(ds[0]['file'] )
snake_case__ : Optional[int] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__lowerCamelCase )
snake_case__ : Tuple = outputs.logits.detach().cpu()
snake_case__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase , target_sizes=[(500, 300)] )
snake_case__ : List[Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
snake_case__ : Tuple = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
snake_case__ : Optional[Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
| 270 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A: Tuple = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=UpperCAmelCase_, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=UpperCAmelCase_, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=UpperCAmelCase_, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
SCREAMING_SNAKE_CASE_ : bool = field(default=UpperCAmelCase_, metadata={'help': 'Whether tp freeze the encoder.'} )
SCREAMING_SNAKE_CASE_ : bool = field(default=UpperCAmelCase_, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1_0_2_4, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1_2_8, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1_4_2, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1_4_2, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=UpperCAmelCase_, metadata={'help': 'Source language id for translation.'} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=UpperCAmelCase_, metadata={'help': 'Target language id for translation.'} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=UpperCAmelCase_, metadata={'help': '# num_beams to use for evaluation.'} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=UpperCAmelCase_, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def _UpperCAmelCase ( a : Any , a : Dict , a : Optional[Any] ) -> Any:
"""simple docstring"""
logger.info(f"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(f" {key} = {metrics[key]}" )
save_json(a , os.path.join(a , f"{split}_results.json" ) )
def _UpperCAmelCase ( ) -> str:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase_ : Union[str, Any] = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(a , a , a ):
assert hasattr(a , a ), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(a , a , getattr(a , a ) )
lowercase_ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase_ : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(a , a ):
lowercase_ : Union[str, Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase_ : Dict = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase_ : str = SeqaSeqDataset
# Get datasets
lowercase_ : Tuple = (
dataset_class(
a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowercase_ : Optional[int] = (
dataset_class(
a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase_ : Dict = (
dataset_class(
a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase_ : List[str] = (
build_compute_metrics_fn(data_args.task , a ) if training_args.predict_with_generate else None
)
lowercase_ : Union[str, Any] = SeqaSeqTrainer(
model=a , args=a , data_args=a , train_dataset=a , eval_dataset=a , data_collator=SeqaSeqDataCollator(
a , a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=a , tokenizer=a , )
lowercase_ : Tuple = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowercase_ : str = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase_ : str = train_result.metrics
lowercase_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , a , training_args.output_dir )
all_metrics.update(a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase_ : str = trainer.evaluate(metric_key_prefix='val' )
lowercase_ : Dict = data_args.n_val
lowercase_ : Optional[int] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , a , training_args.output_dir )
all_metrics.update(a )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowercase_ : List[Any] = trainer.predict(test_dataset=a , metric_key_prefix='test' )
lowercase_ : Optional[Any] = test_output.metrics
lowercase_ : Union[str, Any] = data_args.n_test
if trainer.is_world_process_zero():
lowercase_ : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , a , training_args.output_dir )
all_metrics.update(a )
if training_args.predict_with_generate:
lowercase_ : Tuple = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=a , clean_up_tokenization_spaces=a )
lowercase_ : Any = lmap(str.strip , a )
write_txt_file(a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def _UpperCAmelCase ( a : Optional[int] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 7 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 1 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A__ : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = ['''input_values''', '''attention_mask''']
def __init__( self , A_ = 1 , A_ = 1_6000 , A_ = 0.0 , A_ = False , A_ = 80 , A_ = 16 , A_ = 64 , A_ = "hann_window" , A_ = 1.0 , A_ = 80 , A_ = 7600 , A_ = 1E-10 , A_ = 2 , A_ = True , **A_ , ) -> int:
"""simple docstring"""
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
_lowercase: List[Any] = do_normalize
_lowercase: Optional[int] = return_attention_mask
_lowercase: Union[str, Any] = num_mel_bins
_lowercase: str = hop_length
_lowercase: str = win_length
_lowercase: List[Any] = win_function
_lowercase: Any = frame_signal_scale
_lowercase: List[str] = fmin
_lowercase: Optional[Any] = fmax
_lowercase: Union[str, Any] = mel_floor
_lowercase: List[str] = reduction_factor
_lowercase: List[str] = win_length * sampling_rate // 1000
_lowercase: Union[str, Any] = hop_length * sampling_rate // 1000
_lowercase: List[str] = optimal_fft_length(self.sample_size )
_lowercase: Union[str, Any] = (self.n_fft // 2) + 1
_lowercase: str = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_ )
_lowercase: Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , A_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , A_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ ( A_ , A_ , A_ = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
_lowercase: str = np.array(A_ , np.intaa )
_lowercase: Union[str, Any] = []
for vector, length in zip(A_ , attention_mask.sum(-1 ) ):
_lowercase: str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowercase: Tuple = padding_value
normed_input_values.append(A_ )
else:
_lowercase: List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowercase_ ( self , A_ , ) -> np.ndarray:
"""simple docstring"""
_lowercase: Tuple = spectrogram(
A_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , A_ = None , A_ = None , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
_lowercase: Optional[Any] = self._process_audio(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , )
else:
_lowercase: str = None
if audio_target is not None:
_lowercase: str = self._process_audio(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , )
if inputs is None:
return inputs_target
else:
_lowercase: Union[str, Any] = inputs_target['''input_values''']
_lowercase: Tuple = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
_lowercase: Any = decoder_attention_mask
return inputs
def lowercase_ ( self , A_ , A_ = False , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , **A_ , ) -> BatchFeature:
"""simple docstring"""
_lowercase: Optional[int] = isinstance(A_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowercase: Optional[Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase: List[str] = [np.asarray(A_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
_lowercase: Optional[int] = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_lowercase: int = speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase: Dict = [speech]
# needed to make pad() work on spectrogram inputs
_lowercase: Any = self.feature_size
# convert into correct format for padding
if is_target:
_lowercase: str = [self._extract_mel_features(A_ ) for waveform in speech]
_lowercase: int = BatchFeature({'''input_values''': features} )
_lowercase: Dict = self.num_mel_bins
else:
_lowercase: str = BatchFeature({'''input_values''': speech} )
_lowercase: Union[str, Any] = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
_lowercase: Dict = feature_size_hack
# convert input values to correct format
_lowercase: Any = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
_lowercase: Union[str, Any] = [np.asarray(A_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(A_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_lowercase: Any = [array.astype(np.floataa ) for array in input_values]
elif isinstance(A_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_lowercase: List[str] = input_values.astype(np.floataa )
# convert attention_mask to correct format
_lowercase: Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_lowercase: Optional[Any] = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_lowercase: str = (
attention_mask
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowercase: Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=A_ , padding_value=self.padding_value )
if return_tensors is not None:
_lowercase: Dict = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
def lowercase_ ( self ) -> Dict[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_lowercase: List[str] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 353 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self , A_ , A_=13 , A_=32 , A_=3 , A_=4 , A_=[10, 20, 30, 40] , A_=[2, 2, 3, 2] , A_=True , A_=True , A_=37 , A_="gelu" , A_=10 , A_=0.02 , A_=["stage2", "stage3", "stage4"] , A_=3 , A_=None , ) -> List[str]:
"""simple docstring"""
_lowercase: str = parent
_lowercase: Union[str, Any] = batch_size
_lowercase: Tuple = image_size
_lowercase: Tuple = num_channels
_lowercase: Optional[int] = num_stages
_lowercase: List[Any] = hidden_sizes
_lowercase: Dict = depths
_lowercase: int = is_training
_lowercase: Tuple = use_labels
_lowercase: Any = intermediate_size
_lowercase: Union[str, Any] = hidden_act
_lowercase: str = type_sequence_label_size
_lowercase: int = initializer_range
_lowercase: List[str] = out_features
_lowercase: Any = num_labels
_lowercase: Any = scope
_lowercase: str = num_stages
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase: Any = None
if self.use_labels:
_lowercase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase: Tuple = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=A_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=A_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase_ ( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
_lowercase: List[str] = UperNetForSemanticSegmentation(config=A_ )
model.to(A_ )
model.eval()
_lowercase: int = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: Optional[int] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): Tuple = config_and_inputs
_lowercase: str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCamelCase_ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[Any] = UperNetModelTester(self )
_lowercase: Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
return
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase , _lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: Optional[Any] = model_class(A_ )
_lowercase: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase: Dict = [*signature.parameters.keys()]
_lowercase: int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
pass
def lowercase_ ( self ) -> str:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
_lowercase: Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_lowercase: List[str] = model(**self._prepare_for_class(A_ , A_ ) )
_lowercase: Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase: Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowercase , _lowercase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: Optional[int] = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase: Union[str, Any] = True
check_hidden_states_output(A_ , A_ , A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase: Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase: int = _config_zero_init(A_ )
_lowercase: Optional[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowercase: Any = model_class(config=A_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
pass
@slow
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase: Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[str] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
_lowercase: Optional[int] = Image.open(_UpperCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Optional[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_lowercase: Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(A_ )
_lowercase: List[Any] = prepare_img()
_lowercase: Any = processor(images=A_ , return_tensors='''pt''' ).to(A_ )
with torch.no_grad():
_lowercase: str = model(**A_ )
_lowercase: Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A_ )
_lowercase: Optional[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1E-4 ) )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Tuple = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_lowercase: str = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(A_ )
_lowercase: Union[str, Any] = prepare_img()
_lowercase: Optional[Any] = processor(images=A_ , return_tensors='''pt''' ).to(A_ )
with torch.no_grad():
_lowercase: str = model(**A_ )
_lowercase: Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A_ )
_lowercase: Tuple = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1E-4 ) )
| 353 | 1 |
def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0 ) ->int:
return sum(e for e in range(3 , lowerCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 627 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class __lowercase ( __snake_case ):
UpperCamelCase = '''ctrl'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , __lowerCamelCase : Optional[int]=2_4_6_5_3_4 , __lowerCamelCase : Union[str, Any]=2_5_6 , __lowerCamelCase : int=1_2_8_0 , __lowerCamelCase : Optional[Any]=8_1_9_2 , __lowerCamelCase : List[str]=4_8 , __lowerCamelCase : Dict=1_6 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=1e-6 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = dff
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
super().__init__(**__lowerCamelCase )
| 627 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['image_processor', 'feature_extractor']
SCREAMING_SNAKE_CASE_ = 'TvltImageProcessor'
SCREAMING_SNAKE_CASE_ = 'TvltFeatureExtractor'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
super().__init__(image_processor=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_processor
lowerCamelCase_ = feature_extractor
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
lowerCamelCase_ = None
if images is not None:
lowerCamelCase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , mask_pixel=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images_mixed is not None:
lowerCamelCase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , is_mixed=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , mask_audio=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
return output_dict
@property
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.image_processor.model_input_names
lowerCamelCase_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 42 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], Type[Formatter]] = {}
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], str] = {}
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], Exception] = {}
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,) -> str:
lowerCamelCase : List[str] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
lowerCamelCase : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
lowerCamelCase : Dict = format_type
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple:
lowerCamelCase : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
SCREAMING_SNAKE_CASE__ : str = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
SCREAMING_SNAKE_CASE__ : str = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
SCREAMING_SNAKE_CASE__ : Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A ( _SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Formatter:
lowerCamelCase : Tuple = get_format_type_from_alias(_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 311 | 0 |
'''simple docstring'''
def snake_case_ (_a : int = 1_0_0_0_0_0_0 ):
UpperCAmelCase = set(range(3 , _a , 2 ) )
primes.add(2 )
for p in range(3 , _a , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _a , _a ) ) )
UpperCAmelCase = [float(_a ) for n in range(limit + 1 )]
for p in primes:
for n in range(_a , limit + 1 , _a ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 709 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
A =datasets.logging.get_logger(__name__)
A ='\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
A ='\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
A ='\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
A ={
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def A ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def A ( self : List[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
UpperCAmelCase = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCAmelCase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCAmelCase = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCAmelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
UpperCAmelCase = score.BleurtScorer(os.path.join(lowercase , lowercase ) )
def A ( self : str , lowercase : str , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.scorer.score(references=lowercase , candidates=lowercase )
return {"scores": scores}
| 358 | 0 |
from ....utils import logging
a_ = logging.get_logger(__name__)
class _lowercase ( snake_case_ ):
def __init__( self : List[Any] , snake_case : str , snake_case : Tuple=None , snake_case : List[Any]=2_0_4_8 ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = config.__dict__
UpperCamelCase_ : Any = modal_hidden_size
if num_labels:
UpperCamelCase_ : List[str] = num_labels
| 417 | import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowercase ( lowerCamelCase : Optional[Any] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Optional[int] , snake_case : nn.Module , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : str = module
UpperCamelCase_ : Optional[int] = nn.Sequential(
nn.Linear(module.in_features , snake_case , bias=snake_case ) , nn.Linear(snake_case , module.out_features , bias=snake_case ) , )
UpperCamelCase_ : Optional[int] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Dict , *snake_case : Any , **snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.module(snake_case , *snake_case , **snake_case ) + self.adapter(snake_case )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowercase = 'bigscience/bloom-1b7'
# Constant values
lowercase = 2.109_6595_5269_2574
lowercase = 'Hello my name is'
lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowercase = 1_0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
# Models and tokenizer
UpperCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
UpperCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(snake_case , 'quantization_config' ) )
UpperCamelCase_ : str = config.to_dict()
UpperCamelCase_ : List[Any] = config.to_diff_dict()
UpperCamelCase_ : Any = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
UpperCamelCase_ : Union[str, Any] = self.model_fpaa.get_memory_footprint()
UpperCamelCase_ : Optional[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCamelCase_ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase_ : Tuple = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = BitsAndBytesConfig()
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case , device_map='auto' )
UpperCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaises(snake_case ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = BitsAndBytesConfig()
with self.assertRaises(snake_case ):
UpperCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case , load_in_abit=snake_case , device_map='auto' , bnb_abit_quant_type='nf4' , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(snake_case ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(snake_case ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(snake_case ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCamelCase_ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase_ : Dict = self.model_fpaa.to(torch.floataa )
UpperCamelCase_ : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
UpperCamelCase_ : Union[str, Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
UpperCamelCase_ : str = self.model_fpaa.half()
# Check this does not throw an error
UpperCamelCase_ : Optional[int] = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=snake_case , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = 't5-small'
UpperCamelCase_ : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
UpperCamelCase_ : Tuple = AutoTokenizer.from_pretrained(cls.model_name )
UpperCamelCase_ : Tuple = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
from transformers import TaForConditionalGeneration
UpperCamelCase_ : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCamelCase_ : Optional[Any] = None
# test with `t5-small`
UpperCamelCase_ : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
UpperCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase_ : str = model.generate(**snake_case )
# test with `flan-t5-small`
UpperCamelCase_ : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case , device_map='auto' )
UpperCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase_ : Dict = model.generate(**snake_case )
UpperCamelCase_ : int = modules
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCamelCase_ : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase_ : Any = model.generate(**snake_case )
# test with `flan-t5-small`
UpperCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case , device_map='auto' )
UpperCamelCase_ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase_ : Optional[Any] = model.generate(**snake_case )
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().setUp()
# model_name
UpperCamelCase_ : Union[str, Any] = 'bigscience/bloom-560m'
UpperCamelCase_ : List[Any] = 't5-small'
# Different types of model
UpperCamelCase_ : Any = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
# Sequence classification model
UpperCamelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case , device_map='auto' )
# CausalLM model
UpperCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
# Seq2seq model
UpperCamelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case , device_map='auto' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCamelCase_ : Dict = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
"""simple docstring"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
UpperCamelCase_ : List[str] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case ) , self.EXPECTED_OUTPUTS )
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
UpperCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCamelCase_ : str = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCamelCase_ : Dict = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case ) ):
UpperCamelCase_ : Any = LoRALayer(module.q_proj , rank=1_6 )
UpperCamelCase_ : Union[str, Any] = LoRALayer(module.k_proj , rank=1_6 )
UpperCamelCase_ : Optional[int] = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
UpperCamelCase_ : Tuple = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCamelCase_ : Optional[Any] = model.forward(**snake_case )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case , snake_case ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( snake_case_ ):
lowercase = 'gpt2-xl'
lowercase = 3.3191_8548_5415_2187
| 417 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__magic_name__ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
__magic_name__ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__magic_name__ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__magic_name__ : str = "question"
__magic_name__ : str = "context"
__magic_name__ : str = "answers"
@property
def a__( self : Optional[int] )-> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 712 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50 | 0 |
import math
import sys
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
try:
with open(_lowercase , 'rb' ) as binary_file:
__UpperCamelCase = binary_file.read()
for dat in data:
__UpperCamelCase = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = {'0': '0', '1': '1'}
__UpperCamelCase, __UpperCamelCase = '', ''
__UpperCamelCase = len(_lowercase )
for i in range(len(_lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCamelCase = lexicon[curr_string]
result += last_match_id
__UpperCamelCase = last_match_id + '0'
if math.loga(_lowercase ).is_integer():
__UpperCamelCase = {}
for curr_key in list(_lowercase ):
__UpperCamelCase = lexicon.pop(_lowercase )
__UpperCamelCase = new_lex
__UpperCamelCase = last_match_id + '1'
index += 1
__UpperCamelCase = ''
return result
def _A ( _lowercase , _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = 8
try:
with open(_lowercase , 'wb' ) as opened_file:
__UpperCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowercase ) , _lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowercase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__UpperCamelCase = data_bits[counter:]
__UpperCamelCase = data_bits[counter + 1 :]
return data_bits
def _A ( _lowercase , _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = read_file_binary(_lowercase )
__UpperCamelCase = remove_prefix(_lowercase )
__UpperCamelCase = decompress_data(_lowercase )
write_file_binary(_lowercase , _lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 1 | import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def a__ ( __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 140 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Union[str, Any] = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 174 | '''simple docstring'''
__snake_case : List[Any] = 256
# Modulus to hash a string
__snake_case : str = 1_000_003
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str ) -> bool:
A_ = len(_UpperCamelCase )
A_ = len(_UpperCamelCase )
if p_len > t_len:
return False
A_ = 0
A_ = 0
A_ = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCamelCase ):
A_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _UpperCAmelCase ( ) -> None:
A_ = '''abc1abc12'''
A_ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase ) and not rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 2)
A_ = '''ABABX'''
A_ = '''ABABZABABYABABX'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 3)
A_ = '''AAAB'''
A_ = '''ABAAAAAB'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 4)
A_ = '''abcdabcy'''
A_ = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 5)
A_ = '''Lü'''
A_ = '''Lüsai'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
A_ = '''Lue'''
assert not rabin_karp(_UpperCamelCase, _UpperCamelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 174 | 1 |
from __future__ import annotations
__a = tuple[int, int, int]
__a = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__a = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
__a = 'FOBHMDKEXQNRAULPGSJVTYICZW'
__a = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__a = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__a = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
__a = 'SGLCPQWZHKXAREONTFBVIYJUDM'
__a = 'HVSICLTYKQUBXDWAJZOMFGPREN'
__a = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
__a = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
__a = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if (unique_rotsel := len(set(_lowercase ) )) < 3:
UpperCAmelCase_ : List[str] = f'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_lowercase )
# Checks if rotor positions are valid
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = rotpos
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase_ : Dict = f'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase_ : int = f'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase_ : List[Any] = f'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase )
# Validates string and returns dict
UpperCAmelCase_ : int = _plugboard(_lowercase )
return rotpos, rotsel, pbdict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''Plugboard setting isn\'t type string ({type(_lowercase )})'''
raise TypeError(_lowercase )
elif len(_lowercase ) % 2 != 0:
UpperCAmelCase_ : Optional[int] = f'''Odd number of symbols ({len(_lowercase )})'''
raise Exception(_lowercase )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
UpperCAmelCase_ : List[Any] = set()
for i in pbstring:
if i not in abc:
UpperCAmelCase_ : Optional[int] = f'''\'{i}\' not in list of symbols'''
raise Exception(_lowercase )
elif i in tmppbl:
UpperCAmelCase_ : Optional[Any] = f'''Duplicate symbol ({i})'''
raise Exception(_lowercase )
else:
tmppbl.add(_lowercase )
del tmppbl
# Created the dictionary
UpperCAmelCase_ : List[Any] = {}
for j in range(0 , len(_lowercase ) - 1 , 2 ):
UpperCAmelCase_ : Any = pbstring[j + 1]
UpperCAmelCase_ : int = pbstring[j]
return pb
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = (rotora, rotora, rotora) , _lowercase = "" , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = text.upper()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Tuple = _validator(
_lowercase , _lowercase , plugb.upper() )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : int = rotor_position
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCAmelCase_ : Tuple = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCAmelCase_ : Dict = plugboard[symbol]
# rotor ra --------------------------
UpperCAmelCase_ : Tuple = abc.index(_lowercase ) + rotorposa
UpperCAmelCase_ : Union[str, Any] = rotora[index % len(_lowercase )]
# rotor rb --------------------------
UpperCAmelCase_ : Optional[int] = abc.index(_lowercase ) + rotorposa
UpperCAmelCase_ : Optional[Any] = rotora[index % len(_lowercase )]
# rotor rc --------------------------
UpperCAmelCase_ : int = abc.index(_lowercase ) + rotorposa
UpperCAmelCase_ : int = rotora[index % len(_lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCAmelCase_ : Optional[Any] = reflector[symbol]
# 2nd rotors
UpperCAmelCase_ : int = abc[rotora.index(_lowercase ) - rotorposa]
UpperCAmelCase_ : Union[str, Any] = abc[rotora.index(_lowercase ) - rotorposa]
UpperCAmelCase_ : List[str] = abc[rotora.index(_lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCAmelCase_ : Dict = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase_ : Dict = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase_ : List[Any] = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase_ : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
__a = 'This is my Python script that emulates the Enigma machine from WWII.'
__a = (1, 1, 1)
__a = 'pictures'
__a = (rotora, rotora, rotora)
__a = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb)) | 30 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def lowerCAmelCase_ ( snake_case_ : str ) -> dict:
'''simple docstring'''
UpperCAmelCase_ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(snake_case_ ).json()
def lowerCAmelCase_ ( snake_case_ : int = 10 ) -> list[dict]:
'''simple docstring'''
UpperCAmelCase_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
UpperCAmelCase_ = requests.get(snake_case_ ).json()[:max_stories]
return [get_hackernews_story(snake_case_ ) for story_id in story_ids]
def lowerCAmelCase_ ( snake_case_ : int = 10 ) -> str:
'''simple docstring'''
UpperCAmelCase_ = hackernews_top_stories(snake_case_ )
return "\n".join("* [{title}]({url})".format(**snake_case_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 707 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE_: List[Any] ={
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase_ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def UpperCAmelCase__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase_ ):
http_head('https://huggingface.co' )
| 47 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowerCamelCase : Dict = True
except ImportError:
_lowerCamelCase : List[Any] = False
try:
from torch.hub import _get_torch_home
_lowerCamelCase : Optional[int] = _get_torch_home()
except ImportError:
_lowerCamelCase : str = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
_lowerCamelCase : List[Any] = os.path.join(torch_cache_home, '''transformers''')
_lowerCamelCase : Dict = '''https://cdn.huggingface.co'''
_lowerCamelCase : int = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
_lowerCamelCase : Optional[int] = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
_lowerCamelCase : Any = os.path.join(PATH, '''config.yaml''')
_lowerCamelCase : Dict = os.path.join(PATH, '''attributes.txt''')
_lowerCamelCase : str = os.path.join(PATH, '''objects.txt''')
_lowerCamelCase : int = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
_lowerCamelCase : Dict = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
_lowerCamelCase : List[str] = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
_lowerCamelCase : List[Any] = '''pytorch_model.bin'''
_lowerCamelCase : List[str] = '''config.yaml'''
def _a ( SCREAMING_SNAKE_CASE__ : List[str]=OBJECTS , SCREAMING_SNAKE_CASE__ : List[str]=ATTRIBUTES ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
with open(SCREAMING_SNAKE_CASE__ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
SCREAMING_SNAKE_CASE__ : str = []
with open(SCREAMING_SNAKE_CASE__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = pkl.load(SCREAMING_SNAKE_CASE__ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ckp.pop(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
else:
assert isinstance(SCREAMING_SNAKE_CASE__ , torch.tensor ), type(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = v
return r
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = {}
def __init__( self : Any, _UpperCAmelCase : dict, _UpperCAmelCase : str = "root", _UpperCAmelCase : int=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = name
SCREAMING_SNAKE_CASE__ : Union[str, Any] = level
SCREAMING_SNAKE_CASE__ : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE__ : int = copy.deepcopy(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = copy.deepcopy(_UpperCAmelCase )
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = Config(_UpperCAmelCase, name=_UpperCAmelCase, level=level + 1 )
SCREAMING_SNAKE_CASE__ : Dict = v
setattr(self, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = d
def __repr__( self : Any ) -> Optional[Any]:
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = val
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Any = key.split("." )
SCREAMING_SNAKE_CASE__ : Tuple = len(_UpperCAmelCase ) - 1
SCREAMING_SNAKE_CASE__ : str = self._pointer
if len(_UpperCAmelCase ) > 1:
for i, l in enumerate(_UpperCAmelCase ):
if hasattr(self, _UpperCAmelCase ) and isinstance(getattr(self, _UpperCAmelCase ), _UpperCAmelCase ):
setattr(getattr(self, _UpperCAmelCase ), ".".join(levels[i:] ), _UpperCAmelCase )
if l == last_level:
SCREAMING_SNAKE_CASE__ : Optional[Any] = val
else:
SCREAMING_SNAKE_CASE__ : Dict = pointer[l]
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
return self._pointer
def A_ ( self : List[str], _UpperCAmelCase : Tuple, _UpperCAmelCase : int ) -> int:
"""simple docstring"""
with open(F'''{file_name}''', "w" ) as stream:
dump(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
with open(F'''{file_name}''', "w" ) as stream:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
@staticmethod
def A_ ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCAmelCase ) as stream:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load(_UpperCAmelCase, Loader=_UpperCAmelCase )
return data
def __str__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = " "
if self._name != "root":
SCREAMING_SNAKE_CASE__ : Any = F'''{t * (self._level-1)}{self._name}:\n'''
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ""
SCREAMING_SNAKE_CASE__ : Dict = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n'''
SCREAMING_SNAKE_CASE__ : Tuple = level
return r[:-1]
@classmethod
def A_ ( cls : Optional[Any], _UpperCAmelCase : str, **_UpperCAmelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
return cls(_UpperCAmelCase )
@classmethod
def A_ ( cls : Union[str, Any], _UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("cache_dir", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("force_download", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("resume_download", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("proxies", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("local_files_only", _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_bucket_url(_UpperCAmelCase, filename=_UpperCAmelCase, use_cdn=_UpperCAmelCase )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ : Optional[Any] = cached_path(
_UpperCAmelCase, cache_dir=_UpperCAmelCase, force_download=_UpperCAmelCase, proxies=_UpperCAmelCase, resume_download=_UpperCAmelCase, local_files_only=_UpperCAmelCase, )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE__ : int = Config.load_yaml(_UpperCAmelCase )
except EnvironmentError:
SCREAMING_SNAKE_CASE__ : Optional[int] = "Can't load config for"
raise EnvironmentError(_UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(_UpperCAmelCase ), kwargs
def _a ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = torch.load("dump.pt" , map_location=in_tensor.device )
SCREAMING_SNAKE_CASE__ : str = in_tensor.numpy()
SCREAMING_SNAKE_CASE__ : List[str] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=0.0_1 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = urlparse(SCREAMING_SNAKE_CASE__ )
return parsed.scheme in ("http", "https")
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=True ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE__ : Tuple = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join("{}/{}".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE__ : int = {"user-agent": ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE__ : Tuple = "bytes=%d-" % (resume_size,)
SCREAMING_SNAKE_CASE__ : Any = requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ )
if response.status_code == 4_16: # Range not satisfiable
return
SCREAMING_SNAKE_CASE__ : Any = response.headers.get("Content-Length" )
SCREAMING_SNAKE_CASE__ : Tuple = resume_size + int(SCREAMING_SNAKE_CASE__ ) if content_length is not None else None
SCREAMING_SNAKE_CASE__ : Any = tqdm(
unit="B" , unit_scale=SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , initial=SCREAMING_SNAKE_CASE__ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(SCREAMING_SNAKE_CASE__ ) )
temp_file.write(SCREAMING_SNAKE_CASE__ )
progress.close()
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=10 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
if cache_dir is None:
SCREAMING_SNAKE_CASE__ : List[Any] = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = str(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE__ : List[Any] = requests.head(SCREAMING_SNAKE_CASE__ , allow_redirects=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , timeout=SCREAMING_SNAKE_CASE__ )
if response.status_code == 2_00:
SCREAMING_SNAKE_CASE__ : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE__ : List[Any] = url_to_filename(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# get cache path to put the file
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
return cache_path
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
file
for file in fnmatch.filter(os.listdir(SCREAMING_SNAKE_CASE__ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
return os.path.join(SCREAMING_SNAKE_CASE__ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cache_path + ".lock"
with FileLock(SCREAMING_SNAKE_CASE__ ):
# If the download just completed while the lock was activated.
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE__ : int = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(SCREAMING_SNAKE_CASE__ , "a+b" ) as f:
yield f
SCREAMING_SNAKE_CASE__ : Dict = _resumable_file_manager
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = os.stat(SCREAMING_SNAKE_CASE__ ).st_size
else:
SCREAMING_SNAKE_CASE__ : Dict = 0
else:
SCREAMING_SNAKE_CASE__ : List[str] = partial(tempfile.NamedTemporaryFile , dir=SCREAMING_SNAKE_CASE__ , delete=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , SCREAMING_SNAKE_CASE__ , temp_file.name , )
http_get(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_size=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , )
os.replace(temp_file.name , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = {"url": url, "etag": etag}
SCREAMING_SNAKE_CASE__ : List[Any] = cache_path + ".json"
with open(SCREAMING_SNAKE_CASE__ , "w" ) as meta_file:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return cache_path
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = url.encode("utf-8" )
SCREAMING_SNAKE_CASE__ : Dict = shaaaa(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE__ : Optional[Any] = etag.encode("utf-8" )
SCREAMING_SNAKE_CASE__ : Any = shaaaa(SCREAMING_SNAKE_CASE__ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = str(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
if is_remote_url(SCREAMING_SNAKE_CASE__ ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE__ : Any = get_from_cache(
SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , )
elif os.path.exists(SCREAMING_SNAKE_CASE__ ):
# File, and it exists.
SCREAMING_SNAKE_CASE__ : Any = url_or_filename
elif urlparse(SCREAMING_SNAKE_CASE__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(SCREAMING_SNAKE_CASE__ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(SCREAMING_SNAKE_CASE__ ) )
if extract_compressed_file:
if not is_zipfile(SCREAMING_SNAKE_CASE__ ) and not tarfile.is_tarfile(SCREAMING_SNAKE_CASE__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = os.path.split(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = output_file.replace("." , "-" ) + "-extracted"
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.isdir(SCREAMING_SNAKE_CASE__ ) and os.listdir(SCREAMING_SNAKE_CASE__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE__ : str = output_path + ".lock"
with FileLock(SCREAMING_SNAKE_CASE__ ):
shutil.rmtree(SCREAMING_SNAKE_CASE__ , ignore_errors=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ )
if is_zipfile(SCREAMING_SNAKE_CASE__ ):
with ZipFile(SCREAMING_SNAKE_CASE__ , "r" ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE__ )
zip_file.close()
elif tarfile.is_tarfile(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = tarfile.open(SCREAMING_SNAKE_CASE__ )
tar_file.extractall(SCREAMING_SNAKE_CASE__ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(SCREAMING_SNAKE_CASE__ ) )
return output_path_extracted
return output_path
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]="," ) -> List[Any]:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ ) as f:
SCREAMING_SNAKE_CASE__ : Any = eval(f.read() )
else:
SCREAMING_SNAKE_CASE__ : Tuple = requests.get(SCREAMING_SNAKE_CASE__ )
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = requests.json()
except Exception:
SCREAMING_SNAKE_CASE__ : Dict = req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE__ : Optional[int] = eval(SCREAMING_SNAKE_CASE__ )
except Exception:
SCREAMING_SNAKE_CASE__ : Dict = data.split("\n" )
req.close()
return data
def _a ( SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = requests.get(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , "rb" ) as stream:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pkl.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = weights.pop("model" )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
for k, v in model.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
if "running_var" in k:
SCREAMING_SNAKE_CASE__ : int = torch.tensor([0] )
SCREAMING_SNAKE_CASE__ : List[str] = k.replace("running_var" , "num_batches_tracked" )
SCREAMING_SNAKE_CASE__ : str = zero
return new
def _a ( ) -> Optional[Any]:
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(SCREAMING_SNAKE_CASE__ , os.pardir ) )}/demo.ipynb''' )
def _a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict="RGB" ) -> Any:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = cva.imread(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = get_image_from_url(SCREAMING_SNAKE_CASE__ )
assert img is not None, f'''could not connect to: {im}'''
SCREAMING_SNAKE_CASE__ : Dict = cva.cvtColor(SCREAMING_SNAKE_CASE__ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = img[:, :, ::-1]
return img
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ) -> List[str]:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ))
| 157 |
import math
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : float = 1 / 1_23_45 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 3
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = int(SCREAMING_SNAKE_CASE__ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE__ )
integer += 1
if __name__ == "__main__":
print(f"{solution() = }")
| 157 | 1 |
def lowerCAmelCase_ ( lowerCamelCase ):
if not numbers:
return 0
if not isinstance(lowerCamelCase , (list, tuple) ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__magic_name__ : List[Any] =numbers[0]
for i in range(1 , len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
__magic_name__ : Dict =numbers[i]
if number < 0:
__magic_name__ , __magic_name__ : str =min_till_now, max_till_now
__magic_name__ : Union[str, Any] =max(lowerCamelCase , max_till_now * number )
__magic_name__ : Optional[Any] =min(lowerCamelCase , min_till_now * number )
# update the maximum product found till now
__magic_name__ : Union[str, Any] =max(lowerCamelCase , lowerCamelCase )
return max_prod
| 21 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
a_ = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__lowerCamelCase = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCamelCase = '''<|endoftext|>''' if eos_token is None else eos_token
__lowerCamelCase = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCamelCase = unk_token if pad_token is None else pad_token
__lowerCamelCase = eos_token if bos_token is None else bos_token
else:
__lowerCamelCase = '''<pad>''' if pad_token is None else pad_token
__lowerCamelCase = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
__lowerCamelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCamelCase = re.compile(
F"""[{"".join(map(__UpperCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ):
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.non_printing_characters_re.sub('''''' , __UpperCAmelCase )
# Normalize whitespaces
__lowerCamelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__lowerCamelCase = unicodedata.normalize('''NFC''' , __UpperCAmelCase )
return text
def lowerCamelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
return out_string
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase )
else:
__lowerCamelCase = [self.preprocess_text(__UpperCAmelCase ) for t in text]
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
__lowerCamelCase = torch.tensor(__UpperCAmelCase )
return token_ids
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__lowerCamelCase = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__UpperCAmelCase )
| 175 | 0 |
'''simple docstring'''
_lowerCAmelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowerCAmelCase : Any = [{"type": "code", "content": INSTALL_CONTENT}]
_lowerCAmelCase : int = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 694 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def snake_case ( snake_case__ :Any) -> Union[str, Any]:
_A = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_A = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_A = 4
_A = 48
_A = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_A = [6, 6, 6, 6]
_A = 60
_A = [6, 6, 6, 6]
_A = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_A = 4
_A = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_A = 1
_A = 1
_A = 126
_A = 7
_A = 255.0
_A = """"""
return config
def snake_case ( snake_case__ :List[Any] , snake_case__ :Dict) -> Optional[Any]:
if "patch_embed.proj" in name and "layers" not in name:
_A = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "patch_embed.norm" in name:
_A = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""")
if "layers" in name:
_A = name.replace("""layers""" , """encoder.stages""")
if "residual_group.blocks" in name:
_A = name.replace("""residual_group.blocks""" , """layers""")
if "attn.proj" in name:
_A = name.replace("""attn.proj""" , """attention.output.dense""")
if "attn" in name:
_A = name.replace("""attn""" , """attention.self""")
if "norm1" in name:
_A = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name:
_A = name.replace("""norm2""" , """layernorm_after""")
if "mlp.fc1" in name:
_A = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
_A = name.replace("""mlp.fc2""" , """output.dense""")
if "q_bias" in name:
_A = name.replace("""q_bias""" , """query.bias""")
if "k_bias" in name:
_A = name.replace("""k_bias""" , """key.bias""")
if "v_bias" in name:
_A = name.replace("""v_bias""" , """value.bias""")
if "cpb_mlp" in name:
_A = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""")
if "patch_embed.proj" in name:
_A = name.replace("""patch_embed.proj""" , """patch_embed.projection""")
if name == "norm.weight":
_A = """layernorm.weight"""
if name == "norm.bias":
_A = """layernorm.bias"""
if "conv_first" in name:
_A = name.replace("""conv_first""" , """first_convolution""")
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_A = name.replace("""conv_last""" , """final_convolution""")
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_A = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""")
if "upsample.0" in name:
_A = name.replace("""upsample.0""" , """upsample.convolution_0""")
if "upsample.2" in name:
_A = name.replace("""upsample.2""" , """upsample.convolution_1""")
_A = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
_A = name.replace("""upsample.0.weight""" , """upsample.conv.weight""")
_A = name.replace("""upsample.0.bias""" , """upsample.conv.bias""")
else:
pass
else:
_A = """swin2sr.""" + name
return name
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str) -> Optional[int]:
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(snake_case__)
if "qkv" in key:
_A = key.split(""".""")
_A = int(key_split[1])
_A = int(key_split[4])
_A = config.embed_dim
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
pass
else:
_A = val
return orig_state_dict
def snake_case ( snake_case__ :str , snake_case__ :Union[str, Any] , snake_case__ :List[Any]) -> Tuple:
_A = get_config(snake_case__)
_A = SwinaSRForImageSuperResolution(snake_case__)
model.eval()
_A = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""")
_A = convert_state_dict(snake_case__ , snake_case__)
_A , _A = model.load_state_dict(snake_case__ , strict=snake_case__)
if len(snake_case__) > 0:
raise ValueError("""Missing keys when converting: {}""".format(snake_case__))
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''')
# verify values
_A = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("""RGB""")
_A = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_A = 126 if """Jpeg""" in checkpoint_url else 256
_A = Compose(
[
Resize((image_size, image_size)),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225]),
])
_A = transforms(snake_case__).unsqueeze(0)
if config.num_channels == 1:
_A = pixel_values[:, 0, :, :].unsqueeze(1)
_A = model(snake_case__)
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_A = torch.Size([1, 3, 512, 512])
_A = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]])
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_A = torch.Size([1, 3, 1_024, 1_024])
_A = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]])
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_A = torch.Size([1, 3, 1_024, 1_024])
_A = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]])
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_A = torch.Size([1, 3, 512, 512])
_A = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]])
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_A = torch.Size([1, 3, 1_024, 1_024])
_A = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]])
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1E-3)
print("""Looks ok!""")
_A = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
_A = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
processor.save_pretrained(snake_case__)
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''')
processor.push_to_hub(F'''caidas/{model_name}''')
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 401 | import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.speecht5')
_SCREAMING_SNAKE_CASE = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_SCREAMING_SNAKE_CASE = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_SCREAMING_SNAKE_CASE = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_SCREAMING_SNAKE_CASE = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_SCREAMING_SNAKE_CASE = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_SCREAMING_SNAKE_CASE = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_SCREAMING_SNAKE_CASE = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_SCREAMING_SNAKE_CASE = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Tuple , snake_case__ :List[Any] , snake_case__ :int , snake_case__ :Any) -> Union[str, Any]:
for attribute in key.split("""."""):
_A = getattr(snake_case__ , snake_case__)
if weight_type is not None:
_A = getattr(snake_case__ , snake_case__).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
else:
_A = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''')
def snake_case ( snake_case__ :str , snake_case__ :Union[str, Any]) -> List[Any]:
for key in ignore_keys:
if key.endswith(""".*"""):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
_A , _A = key.split(""".*.""")
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case ( snake_case__ :Tuple , snake_case__ :List[str] , snake_case__ :Union[str, Any]) -> Union[str, Any]:
_A = []
if task == "s2t":
_A = hf_model.speechta.encoder.prenet.feature_encoder
_A = MAPPING_S2T
_A = IGNORE_KEYS_S2T
elif task == "t2s":
_A = None
_A = MAPPING_T2S
_A = IGNORE_KEYS_T2S
elif task == "s2s":
_A = hf_model.speechta.encoder.prenet.feature_encoder
_A = MAPPING_S2S
_A = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''')
for name, value in fairseq_dict.items():
if should_ignore(snake_case__ , snake_case__):
logger.info(F'''{name} was ignored''')
continue
_A = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_A = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_A , _A = key.split(""".*.""")
if prefix in name and suffix in name:
_A = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_A = True
if "*" in mapped_key:
_A = name.split(snake_case__)[0].split(""".""")[-2]
_A = mapped_key.replace("""*""" , snake_case__)
if "weight_g" in name:
_A = """weight_g"""
elif "weight_v" in name:
_A = """weight_v"""
elif "bias" in name:
_A = """bias"""
elif "weight" in name:
_A = """weight"""
elif "running_mean" in name:
_A = """running_mean"""
elif "running_var" in name:
_A = """running_var"""
elif "num_batches_tracked" in name:
_A = """num_batches_tracked"""
else:
_A = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
continue
if not is_used:
unused_weights.append(snake_case__)
logger.warning(F'''Unused weights: {unused_weights}''')
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :List[Any] , snake_case__ :Optional[int] , snake_case__ :Optional[int]) -> List[str]:
_A = full_name.split("""conv_layers.""")[-1]
_A = name.split(""".""")
_A = int(items[0])
_A = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(snake_case__)
@torch.no_grad()
def snake_case ( snake_case__ :List[Any] , snake_case__ :Tuple , snake_case__ :Optional[int] , snake_case__ :List[Any]=None , snake_case__ :Tuple=None , snake_case__ :Dict=None , ) -> Optional[Any]:
if config_path is not None:
_A = SpeechTaConfig.from_pretrained(snake_case__)
else:
_A = SpeechTaConfig()
if task == "s2t":
_A = config.max_text_positions
_A = SpeechTaForSpeechToText(snake_case__)
elif task == "t2s":
_A = 1_876
_A = 600
_A = config.max_speech_positions
_A = SpeechTaForTextToSpeech(snake_case__)
elif task == "s2s":
_A = 1_876
_A = config.max_speech_positions
_A = SpeechTaForSpeechToSpeech(snake_case__)
else:
raise ValueError(F'''Unknown task name: {task}''')
if vocab_path:
_A = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
_A = AddedToken("""<mask>""" , lstrip=snake_case__ , rstrip=snake_case__)
_A = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token})
tokenizer.add_tokens(["""<ctc_blank>"""])
_A = SpeechTaFeatureExtractor()
_A = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__)
processor.save_pretrained(snake_case__)
_A = torch.load(snake_case__)
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case__ , snake_case__)
model.save_pretrained(snake_case__)
if repo_id:
print("""Pushing to the hub...""")
processor.push_to_hub(snake_case__)
model.push_to_hub(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 401 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __snake_case ( SCREAMING_SNAKE_CASE: List[Any] , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: Optional[int] , SCREAMING_SNAKE_CASE: List[Any]=5 ):
"""simple docstring"""
assert masked_input.count('<mask>' ) == 1
_lowerCAmelCase = torch.tensor(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
_lowerCAmelCase = model(__UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
_lowerCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_lowerCAmelCase = logits[0, masked_index, :]
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase , _lowerCAmelCase = prob.topk(k=__UpperCamelCase , dim=0 )
_lowerCAmelCase = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCamelCase ) )] )
_lowerCAmelCase = tokenizer.mask_token
_lowerCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_lowerCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(__UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(__UpperCamelCase ) , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__UpperCamelCase , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_snake_case = CamembertTokenizer.from_pretrained('''camembert-base''')
_snake_case = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
_snake_case = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 708 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 6_4, 6_4)
_snake_case = torch.rand(1) * 9_9_9
_snake_case = torch.randn(2, 7_7, 7_6_8)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_6_6
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 491 | 0 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
__snake_case = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> tuple[int, int]:
'''simple docstring'''
__snake_case = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
__snake_case = corpus_without_punctuation.split('''\n''' )
__snake_case = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCamelCase ))
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=False )-> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 24 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_lowercase : Optional[Any] = load_file(UpperCAmelCase_ )
_lowercase : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_lowercase : List[Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_lowercase : int = pipeline.text_encoder
else:
_lowercase : Optional[int] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_lowercase : Optional[int] = pipeline.unet
# find the target layer
_lowercase : Union[str, Any] = layer_infos.pop(0 )
while len(UpperCAmelCase_ ) > -1:
try:
_lowercase : Optional[Any] = curr_layer.__getattr__(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
_lowercase : Tuple = layer_infos.pop(0 )
elif len(UpperCAmelCase_ ) == 0:
break
except Exception:
if len(UpperCAmelCase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_lowercase : Tuple = layer_infos.pop(0 )
_lowercase : Optional[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(UpperCAmelCase_ )
else:
pair_keys.append(UpperCAmelCase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_lowercase : str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_lowercase : int = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
_lowercase : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa )
_lowercase : int = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase_ )
return pipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.base_model_path
UpperCamelCase__ = args.checkpoint_path
UpperCamelCase__ = args.dump_path
UpperCamelCase__ = args.lora_prefix_unet
UpperCamelCase__ = args.lora_prefix_text_encoder
UpperCamelCase__ = args.alpha
UpperCamelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 322 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : str = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
__A : List[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__A : Dict = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__A : str = os.environ.get('''USER_TOKEN''', '''''')
def lowercase ( __snake_case : str ):
lowercase_ : Tuple = {
'''Authorization''': F'''token {auth_token}''',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(__snake_case , headers=__snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 141 | 0 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Optional[Any] = (DPMSolverSDEScheduler,)
A__ : Optional[int] = 10
def _lowercase ( self , **_snake_case ) -> Optional[Any]:
_UpperCamelCase : str = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_snake_case )
return config
def _lowercase ( self ) -> Union[str, Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def _lowercase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def _lowercase ( self ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def _lowercase ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def _lowercase ( self ) -> str:
_UpperCamelCase : Any = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config()
_UpperCamelCase : Dict = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Dict = self.dummy_model()
_UpperCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : Optional[int] = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : List[str] = scheduler.scale_model_input(_snake_case , _snake_case )
_UpperCamelCase : Any = model(_snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = scheduler.step(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : List[str] = output.prev_sample
_UpperCamelCase : Tuple = torch.sum(torch.abs(_snake_case ) )
_UpperCamelCase : List[Any] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def _lowercase ( self ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase : Union[str, Any] = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Optional[int] = self.dummy_model()
_UpperCamelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : Tuple = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(_snake_case , _snake_case )
_UpperCamelCase : Tuple = model(_snake_case , _snake_case )
_UpperCamelCase : str = scheduler.step(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = output.prev_sample
_UpperCamelCase : Any = torch.sum(torch.abs(_snake_case ) )
_UpperCamelCase : int = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def _lowercase ( self ) -> Any:
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : Any = self.get_scheduler_config()
_UpperCamelCase : str = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
_UpperCamelCase : Union[str, Any] = self.dummy_model()
_UpperCamelCase : int = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(_snake_case , _snake_case )
_UpperCamelCase : Any = model(_snake_case , _snake_case )
_UpperCamelCase : str = scheduler.step(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = output.prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(_snake_case ) )
_UpperCamelCase : List[str] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def _lowercase ( self ) -> str:
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : str = self.get_scheduler_config()
_UpperCamelCase : Optional[int] = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
_UpperCamelCase : int = self.dummy_model()
_UpperCamelCase : Optional[Any] = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
_UpperCamelCase : Dict = sample.to(_snake_case )
for t in scheduler.timesteps:
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(_snake_case , _snake_case )
_UpperCamelCase : int = model(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = scheduler.step(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = output.prev_sample
_UpperCamelCase : Tuple = torch.sum(torch.abs(_snake_case ) )
_UpperCamelCase : Optional[Any] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 683 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=32 , __A=3 , __A=4 , __A=[10, 20, 30, 40] , __A=[2, 2, 3, 2] , __A=True , __A=True , __A=37 , __A="gelu" , __A=10 , __A=0.0_2 , __A=["stage2", "stage3", "stage4"] , __A=[2, 3, 4] , __A=None , ) -> Union[str, Any]:
lowerCAmelCase_ :int = parent
lowerCAmelCase_ :Any = batch_size
lowerCAmelCase_ :Any = image_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :Union[str, Any] = num_stages
lowerCAmelCase_ :Any = hidden_sizes
lowerCAmelCase_ :List[str] = depths
lowerCAmelCase_ :List[str] = is_training
lowerCAmelCase_ :int = use_labels
lowerCAmelCase_ :Union[str, Any] = intermediate_size
lowerCAmelCase_ :Dict = hidden_act
lowerCAmelCase_ :Union[str, Any] = num_labels
lowerCAmelCase_ :Dict = initializer_range
lowerCAmelCase_ :List[Any] = out_features
lowerCAmelCase_ :Any = out_indices
lowerCAmelCase_ :str = scope
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :str = None
if self.use_labels:
lowerCAmelCase_ :Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Any = ConvNextModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Any = model(__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> int:
lowerCAmelCase_ :Optional[Any] = ConvNextForImageClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Tuple = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Any:
lowerCAmelCase_ :str = ConvNextBackbone(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(__A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :int = ConvNextBackbone(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :int = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = config_and_inputs
lowerCAmelCase_ :int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :Dict = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Any = True
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :int = False
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :int = False
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = ConvNextModelTester(self )
lowerCAmelCase_ :List[str] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> int:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Optional[Any] = model_class(__A )
lowerCAmelCase_ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase_ :Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(__A , __A , __A ):
lowerCAmelCase_ :List[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowerCAmelCase_ :Dict = model(**self._prepare_for_class(__A , __A ) )
lowerCAmelCase_ :List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ :str = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[Any] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ :Any = True
check_hidden_states_output(__A , __A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Tuple = ConvNextModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Tuple:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :int = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__A )
lowerCAmelCase_ :int = self.default_image_processor
lowerCAmelCase_ :Optional[int] = prepare_img()
lowerCAmelCase_ :Tuple = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
lowerCAmelCase_ :Any = model(**__A )
# verify the logits
lowerCAmelCase_ :Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase_ :Optional[int] = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , A__ ):
UpperCAmelCase_ :Optional[Any] = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase_ :int = ConvNextConfig
UpperCAmelCase_ :Union[str, Any] = False
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = ConvNextModelTester(self )
| 256 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "wavlm"
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=1E-5 , __A="group" , __A="gelu" , __A=(512, 512, 512, 512, 512, 512, 512) , __A=(5, 2, 2, 2, 2, 2, 2) , __A=(10, 3, 3, 3, 3, 2, 2) , __A=False , __A=128 , __A=16 , __A=320 , __A=800 , __A=False , __A=True , __A=0.0_5 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=320 , __A=2 , __A=0.1 , __A=100 , __A=256 , __A=256 , __A=0.1 , __A="mean" , __A=False , __A=False , __A=256 , __A=(512, 512, 512, 512, 1500) , __A=(5, 3, 3, 1, 1) , __A=(1, 2, 3, 1, 1) , __A=512 , __A=80 , __A=0 , __A=1 , __A=2 , __A=False , __A=3 , __A=2 , __A=3 , __A=None , **__A , ) -> Any:
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
lowerCAmelCase_ :Any = hidden_size
lowerCAmelCase_ :Union[str, Any] = feat_extract_norm
lowerCAmelCase_ :Optional[Any] = feat_extract_activation
lowerCAmelCase_ :int = list(__A )
lowerCAmelCase_ :Optional[int] = list(__A )
lowerCAmelCase_ :List[Any] = list(__A )
lowerCAmelCase_ :Any = conv_bias
lowerCAmelCase_ :int = num_buckets
lowerCAmelCase_ :List[str] = max_bucket_distance
lowerCAmelCase_ :List[str] = num_conv_pos_embeddings
lowerCAmelCase_ :Dict = num_conv_pos_embedding_groups
lowerCAmelCase_ :Union[str, Any] = len(self.conv_dim )
lowerCAmelCase_ :Dict = num_hidden_layers
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Union[str, Any] = hidden_dropout
lowerCAmelCase_ :Optional[Any] = attention_dropout
lowerCAmelCase_ :List[Any] = activation_dropout
lowerCAmelCase_ :Union[str, Any] = feat_proj_dropout
lowerCAmelCase_ :Optional[Any] = final_dropout
lowerCAmelCase_ :Optional[Any] = layerdrop
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :Union[str, Any] = initializer_range
lowerCAmelCase_ :List[Any] = num_ctc_classes
lowerCAmelCase_ :Tuple = vocab_size
lowerCAmelCase_ :List[str] = do_stable_layer_norm
lowerCAmelCase_ :Union[str, Any] = use_weighted_layer_sum
lowerCAmelCase_ :Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ :List[str] = apply_spec_augment
lowerCAmelCase_ :Tuple = mask_time_prob
lowerCAmelCase_ :Optional[Any] = mask_time_length
lowerCAmelCase_ :int = mask_time_min_masks
lowerCAmelCase_ :Optional[Any] = mask_feature_prob
lowerCAmelCase_ :Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase_ :Optional[int] = num_codevector_groups
lowerCAmelCase_ :Tuple = contrastive_logits_temperature
lowerCAmelCase_ :Tuple = num_negatives
lowerCAmelCase_ :str = codevector_dim
lowerCAmelCase_ :int = proj_codevector_dim
lowerCAmelCase_ :Optional[Any] = diversity_loss_weight
# ctc loss
lowerCAmelCase_ :Union[str, Any] = ctc_loss_reduction
lowerCAmelCase_ :Optional[Any] = ctc_zero_infinity
# adapter
lowerCAmelCase_ :Union[str, Any] = add_adapter
lowerCAmelCase_ :List[str] = adapter_kernel_size
lowerCAmelCase_ :Union[str, Any] = adapter_stride
lowerCAmelCase_ :Union[str, Any] = num_adapter_layers
lowerCAmelCase_ :Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ :str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ :List[Any] = list(__A )
lowerCAmelCase_ :List[str] = list(__A )
lowerCAmelCase_ :Optional[int] = list(__A )
lowerCAmelCase_ :Optional[int] = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 256 | 1 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCamelCase : str = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
UpperCamelCase : Optional[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
UpperCamelCase : Union[str, Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,reference_urls=[] ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase__ = np.array([re.sub(_lowerCAmelCase ,"""""" ,_lowerCAmelCase ) for x in predictions] )
lowerCamelCase__ = np.array([re.sub(_lowerCAmelCase ,"""""" ,_lowerCAmelCase ) for x in references] )
else:
lowerCamelCase__ = np.asarray(_lowerCAmelCase )
lowerCamelCase__ = np.asarray(_lowerCAmelCase )
if ignore_case:
lowerCamelCase__ = np.char.lower(_lowerCAmelCase )
lowerCamelCase__ = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
lowerCamelCase__ = string.punctuation.maketrans("""""" ,"""""" ,string.punctuation )
lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase )
lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase )
if ignore_numbers:
lowerCamelCase__ = string.digits.maketrans("""""" ,"""""" ,string.digits )
lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase )
lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase )
lowerCamelCase__ = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 1_00}
| 50 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 1 |
"""simple docstring"""
UpperCamelCase = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
UpperCamelCase = {value: key for key, value in encode_dict.items()}
def A( snake_case_ ):
"""simple docstring"""
lowercase__: Tuple = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def A( snake_case_ ):
"""simple docstring"""
if set(snake_case_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
lowercase__: Optional[Any] = ""
for word in coded.split():
while len(snake_case_ ) != 0:
decoded += decode_dict[word[:5]]
lowercase__: int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 717 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCamelCase = logging.get_logger(__name__)
@dataclass
class _a :
'''simple docstring'''
def __init__( self , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=6.0 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=None , UpperCAmelCase_="fp4" , UpperCAmelCase_=False , **UpperCAmelCase_ , ) -> List[str]:
'''simple docstring'''
lowercase__: Any = load_in_abit
lowercase__: Any = load_in_abit
lowercase__: Dict = llm_inta_threshold
lowercase__: Optional[Any] = llm_inta_skip_modules
lowercase__: Optional[int] = llm_inta_enable_fpaa_cpu_offload
lowercase__: Dict = llm_inta_has_fpaa_weight
lowercase__: List[str] = bnb_abit_quant_type
lowercase__: int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__: Dict = torch.floataa
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowercase__: Union[str, Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , torch.dtype):
lowercase__: int = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
self.post_init()
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , UpperCAmelCase_):
raise ValueError("llm_int8_threshold must be a float")
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCAmelCase_):
raise ValueError("llm_int8_skip_modules must be a list of strings")
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCAmelCase_):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean")
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCAmelCase_):
raise ValueError("llm_int8_has_fp16_weight must be a boolean")
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype")
if not isinstance(self.bnb_abit_quant_type , UpperCAmelCase_):
raise ValueError("bnb_4bit_quant_type must be a string")
if not isinstance(self.bnb_abit_use_double_quant , UpperCAmelCase_):
raise ValueError("bnb_4bit_use_double_quant must be a boolean")
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse(
"0.39.0"):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version")
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __lowercase ( cls , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = cls(**UpperCAmelCase_)
lowercase__: Optional[int] = []
for key, value in kwargs.items():
if hasattr(UpperCAmelCase_ , UpperCAmelCase_):
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
to_remove.append(UpperCAmelCase_)
for key in to_remove:
kwargs.pop(UpperCAmelCase_ , UpperCAmelCase_)
if return_unused_kwargs:
return config, kwargs
else:
return config
def __lowercase ( self , UpperCAmelCase_) -> Any:
'''simple docstring'''
with open(UpperCAmelCase_ , "w" , encoding="utf-8") as writer:
lowercase__: Dict = self.to_dict()
lowercase__: List[Any] = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_) + "\n"
writer.write(UpperCAmelCase_)
def __lowercase ( self) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: List[Any] = copy.deepcopy(self.__dict__)
lowercase__: Any = str(output["bnb_4bit_compute_dtype"]).split(".")[1]
return output
def __repr__( self) -> str:
'''simple docstring'''
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def __lowercase ( self , UpperCAmelCase_ = True) -> str:
'''simple docstring'''
if use_diff is True:
lowercase__: Tuple = self.to_diff_dict()
else:
lowercase__: List[str] = self.to_dict()
return json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_) + "\n"
def __lowercase ( self) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: Tuple = self.to_dict()
# get the default config dict
lowercase__: int = BitsAndBytesConfig().to_dict()
lowercase__: Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__: Any = value
return serializable_config_dict
| 120 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase ( lowerCamelCase_: Any , lowerCamelCase_: str ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _lowerCamelCase ( lowerCamelCase_: str , lowerCamelCase_: str , lowerCamelCase_: List[Any] ):
'''simple docstring'''
A : Dict = tmp_path / '''cache'''
A : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict = ParquetDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_parquet_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _lowerCamelCase ( lowerCamelCase_: str , lowerCamelCase_: Tuple , lowerCamelCase_: Optional[int] ):
'''simple docstring'''
A : Tuple = tmp_path / '''cache'''
A : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A : str = features.copy() if features else default_expected_features
A : Any = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Union[str, Any] = ParquetDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_parquet_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _lowerCamelCase ( lowerCamelCase_: List[Any] , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Tuple ):
'''simple docstring'''
A : Dict = tmp_path / '''cache'''
A : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A : Any = ParquetDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , split=lowerCamelCase_ ).read()
_check_parquet_dataset(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _lowerCamelCase ( lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase_ , lowerCamelCase_ ):
A : Union[str, Any] = parquet_path
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
A : Optional[int] = [parquet_path]
A : Any = tmp_path / '''cache'''
A : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A : Union[str, Any] = ParquetDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_parquet_dataset(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCamelCase ( lowerCamelCase_: Dict , lowerCamelCase_: Tuple , lowerCamelCase_: Any=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for split in splits:
A : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _lowerCamelCase ( lowerCamelCase_: List[str] , lowerCamelCase_: Dict , lowerCamelCase_: Union[str, Any] ):
'''simple docstring'''
A : int = tmp_path / '''cache'''
A : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Any = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_parquet_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _lowerCamelCase ( lowerCamelCase_: List[Any] , lowerCamelCase_: str , lowerCamelCase_: List[str] ):
'''simple docstring'''
A : int = tmp_path / '''cache'''
A : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A : Optional[Any] = features.copy() if features else default_expected_features
A : Union[str, Any] = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int = ParquetDatasetReader({'''train''': parquet_path} , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_parquet_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _lowerCamelCase ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Dict ):
'''simple docstring'''
if split:
A : List[Any] = {split: parquet_path}
else:
A : Any = '''train'''
A : str = {'''train''': parquet_path, '''test''': parquet_path}
A : List[Any] = tmp_path / '''cache'''
A : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A : str = ParquetDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_parquet_datasetdict(lowerCamelCase_ , lowerCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowerCamelCase ( lowerCamelCase_: int , lowerCamelCase_: Dict ):
'''simple docstring'''
A : Tuple = ParquetDatasetWriter(lowerCamelCase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A : Optional[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
A : Dict = pf.read()
assert dataset.data.table == output_table
def _lowerCamelCase ( lowerCamelCase_: Tuple , lowerCamelCase_: str ):
'''simple docstring'''
A : Tuple = str(shared_datadir / '''test_image_rgb.jpg''' )
A : Optional[int] = {'''image''': [image_path]}
A : Tuple = Features({'''image''': Image()} )
A : List[str] = Dataset.from_dict(lowerCamelCase_ , features=lowerCamelCase_ )
A : Dict = ParquetDatasetWriter(lowerCamelCase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A : Tuple = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
A : Union[str, Any] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowerCamelCase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _lowerCamelCase ( lowerCamelCase_: int , lowerCamelCase_: Any ):
'''simple docstring'''
assert get_writer_batch_size(lowerCamelCase_ ) == expected | 256 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
__snake_case = 'ylacombe/bark-small'
__snake_case = tempfile.mkdtemp()
__snake_case = 'en_speaker_1'
__snake_case = 'This is a test string'
__snake_case = 'speaker_embeddings_path.json'
__snake_case = 'speaker_embeddings'
def lowercase__ ( self : int , **__lowerCAmelCase : str ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def lowercase__ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Dict ):
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase__ ( self : int ):
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase__ ( self : str ):
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__snake_case = 3_5
__snake_case = 2
__snake_case = 8
__snake_case = {
'semantic_prompt': np.ones(__lowerCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__snake_case = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
__snake_case = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__snake_case = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
__snake_case = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__snake_case = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase__ ( self : int ):
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=__lowerCAmelCase )
__snake_case = processor(text=self.input_string )
__snake_case = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 356 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 719 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __lowerCAmelCase :
def __init__( self: Dict , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Any ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
lowercase :str = model
lowercase :Any = kwargs.get("model_save_dir" , _lowerCAmelCase )
lowercase :Dict = kwargs.get("latest_model_name" , _lowerCAmelCase )
def __call__( self: Optional[Any] , **_lowerCAmelCase: List[Any] ):
lowercase :List[str] = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: List[Any]=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
lowercase :int = "CPUExecutionProvider"
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: Optional[str] = None , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase :Tuple = self.model_save_dir.joinpath(self.latest_model_name )
lowercase :Optional[int] = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase :Tuple = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
lowercase :Dict = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: int , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: Dict , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: Optional[Union[bool, str, None]] = None , _lowerCAmelCase: Optional[Union[str, None]] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional["ort.SessionOptions"] = None , **_lowerCAmelCase: Optional[int] , ):
lowercase :List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
lowercase :Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
lowercase :Optional[Any] = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
lowercase :str = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
lowercase :Optional[int] = Path(_lowerCAmelCase ).parent
lowercase :Tuple = Path(_lowerCAmelCase ).name
lowercase :Tuple = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: str , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , **_lowerCAmelCase: Any , ):
lowercase :List[str] = None
if len(str(_lowerCAmelCase ).split("@" ) ) == 2:
lowercase , lowercase :Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 453 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase : int = LxmertTokenizer
UpperCamelCase : Dict = LxmertTokenizerFast
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = True
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ='UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_: Any ='unwanted, running'
return input_text, output_text
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_: str =tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] ='I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.tokenize(lowercase_ )
SCREAMING_SNAKE_CASE_: Tuple =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
SCREAMING_SNAKE_CASE_: Any =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE_: List[Any] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.encode(lowercase_ )
SCREAMING_SNAKE_CASE_: str =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 409 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCamelCase_ ):
@slow
@require_torch
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : int =EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_lowerCamelCase : Dict =BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCamelCase : Union[str, Any] =bertabert.config.encoder.vocab_size
_lowerCamelCase : Dict =tokenizer.sep_token_id
_lowerCamelCase : List[Any] =tokenizer.cls_token_id
_lowerCamelCase : Optional[int] =128
_lowerCamelCase : int =datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_lowerCamelCase : int =datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_lowerCamelCase : Optional[int] =train_dataset.select(range(32 ) )
_lowerCamelCase : Optional[int] =val_dataset.select(range(16 ) )
_lowerCamelCase : Optional[int] =4
def _map_to_encoder_decoder_inputs(lowercase_ : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase : Optional[int] =tokenizer(batch['article'] , padding='max_length' , truncation=lowercase_ , max_length=512 )
_lowerCamelCase : List[str] =tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase_ , max_length=128 )
_lowerCamelCase : List[str] =inputs.input_ids
_lowerCamelCase : Any =inputs.attention_mask
_lowerCamelCase : List[str] =outputs.input_ids
_lowerCamelCase : int =outputs.input_ids.copy()
_lowerCamelCase : List[str] =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_lowerCamelCase : Dict =outputs.attention_mask
assert all(len(lowercase_ ) == 512 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ : str ):
_lowerCamelCase : List[Any] =pred.label_ids
_lowerCamelCase : List[Any] =pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase : str =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : Tuple =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : List[str] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase : Tuple =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_lowerCamelCase : Tuple =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_lowerCamelCase : Dict =self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[int] =SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy='steps' , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase : Optional[Any] =SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 464 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = False, False, False
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : bool = True
__lowerCAmelCase : bool = True
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__lowerCAmelCase : str = field(default="""Audio""" , init=snake_case__ , repr=snake_case__ )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __UpperCamelCase ( self , lowerCamelCase_ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return {"bytes": None, "path": value}
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_a : str = BytesIO()
sf.write(lowerCamelCase_ , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_a : Union[str, Any] = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_a : int = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2_7_6_7
_a : Union[str, Any] = BytesIO(bytes() )
sf.write(lowerCamelCase_ , lowerCamelCase_ , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> dict:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
_a , _a : Union[str, Any] = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
_a : int = xsplitext(lowerCamelCase_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
_a : Optional[int] = token_per_repo_id or {}
_a : Optional[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(lowerCamelCase_ , config.HUB_DATASETS_URL )['repo_id']
_a : List[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_a : str = None
with xopen(lowerCamelCase_ , 'rb' , use_auth_token=lowerCamelCase_ ) as f:
_a , _a : Tuple = sf.read(lowerCamelCase_ )
else:
_a , _a : int = sf.read(lowerCamelCase_ )
_a : List[str] = array.T
if self.mono:
_a : Tuple = librosa.to_mono(lowerCamelCase_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_a : Any = librosa.resample(lowerCamelCase_ , orig_sr=lowerCamelCase_ , target_sr=self.sampling_rate )
_a : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def __UpperCamelCase ( self , lowerCamelCase_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_a : int = pa.array([None] * len(lowerCamelCase_ ) , type=pa.binary() )
_a : int = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : Optional[int] = pa.array([None] * len(lowerCamelCase_ ) , type=pa.string() )
_a : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
_a : List[Any] = pa.array([Audio().encode_example(lowerCamelCase_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : int = storage.field('bytes' )
else:
_a : int = pa.array([None] * len(lowerCamelCase_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Tuple = storage.field('path' )
else:
_a : Any = pa.array([None] * len(lowerCamelCase_ ) , type=pa.string() )
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(lowerCamelCase_ , self.pa_type )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase_ ):
with xopen(lowerCamelCase_ , 'rb' ) as f:
_a : Any = f.read()
return bytes_
_a : int = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_a : Any = pa.array(
[os.path.basename(lowerCamelCase_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
_a : int = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_ , self.pa_type )
| 424 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase_ ( A , A = True , A = math.inf , A = -math.inf , A = math.inf , A = -math.inf , A = False , A = 1_0_0 , A = 0.01 , A = 1 , ):
'''simple docstring'''
_a : int = False
_a : Optional[Any] = search_prob
_a : List[Any] = start_temperate
_a : Any = []
_a : List[Any] = 0
_a : Union[str, Any] = None
while not search_end:
_a : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_a : Optional[Any] = current_state
scores.append(A )
iterations += 1
_a : List[Any] = None
_a : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a : Optional[Any] = random.randint(0 , len(A ) - 1 ) # picking a random neighbor
_a : Optional[int] = neighbors.pop(A )
_a : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a : Tuple = picked_neighbor
else:
_a : Dict = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a : List[str] = picked_neighbor
_a : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a : str = True
else:
_a : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A ) , A )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : str = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
UpperCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
UpperCAmelCase_ : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 424 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A_ = '\nHuman: <<task>>\n\nAssistant: '
A_ = 'huggingface-tools/default-prompts'
A_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase="run" )-> List[Any]:
'''simple docstring'''
if prompt_or_repo_id is None:
SCREAMING_SNAKE_CASE_ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' ,_UpperCamelCase ) is not None:
return prompt_or_repo_id
SCREAMING_SNAKE_CASE_ = cached_file(
_UpperCamelCase ,PROMPT_FILES[mode] ,repo_type='''dataset''' ,user_agent={'''agent''': agent_name} )
with open(_UpperCamelCase ,'''r''' ,encoding='''utf-8''' ) as f:
return f.read()
| 393 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCamelCase :Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCamelCase :Tuple = {'facebook/blenderbot_small-90M': 512}
def __snake_case ( _UpperCamelCase ) -> Any:
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
_a = set(_UpperCamelCase )
return pairs
class UpperCAmelCase ( __snake_case ):
a: Any = VOCAB_FILES_NAMES
a: Any = PRETRAINED_VOCAB_FILES_MAP
a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a: int = ["input_ids", "attention_mask"]
def __init__( self: str , __UpperCamelCase: List[str] , __UpperCamelCase: Tuple , __UpperCamelCase: Union[str, Any]="__start__" , __UpperCamelCase: int="__end__" , __UpperCamelCase: Optional[int]="__unk__" , __UpperCamelCase: int="__null__" , **__UpperCamelCase: str , ):
super().__init__(unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_a = json.load(__UpperCamelCase )
_a = {v: k for k, v in self.encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_a = merges_handle.read().split('''\n''' )[1:-1]
_a = [tuple(merge.split() ) for merge in merges]
_a = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_a = {}
@property
def _A ( self: List[Any] ):
return len(self.encoder )
def _A ( self: Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self: Optional[int] , __UpperCamelCase: str ):
if token in self.cache:
return self.cache[token]
_a = re.sub('''([.,!?()])''' , R''' \1''' , __UpperCamelCase )
_a = re.sub('''(\')''' , R''' \1 ''' , __UpperCamelCase )
_a = re.sub(R'''\s{2,}''' , ''' ''' , __UpperCamelCase )
if "\n" in token:
_a = token.replace('''\n''' , ''' __newln__''' )
_a = token.split(''' ''' )
_a = []
for token in tokens:
if not len(__UpperCamelCase ):
continue
_a = token.lower()
_a = tuple(__UpperCamelCase )
_a = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_a = get_pairs(__UpperCamelCase )
if not pairs:
words.append(__UpperCamelCase )
continue
while True:
_a = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a = bigram
_a = []
_a = 0
while i < len(__UpperCamelCase ):
try:
_a = word.index(__UpperCamelCase , __UpperCamelCase )
new_word.extend(word[i:j] )
_a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(__UpperCamelCase )
_a = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_a = get_pairs(__UpperCamelCase )
_a = '''@@ '''.join(__UpperCamelCase )
_a = word[:-4]
_a = word
words.append(__UpperCamelCase )
return " ".join(__UpperCamelCase )
def _A ( self: str , __UpperCamelCase: str ):
_a = []
_a = re.findall(R'''\S+\n?''' , __UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(''' ''' ) ) )
return split_tokens
def _A ( self: Optional[int] , __UpperCamelCase: str ):
_a = token.lower()
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _A ( self: List[Any] , __UpperCamelCase: int ):
return self.decoder.get(__UpperCamelCase , self.unk_token )
def _A ( self: Any , __UpperCamelCase: List[str] ):
_a = ''' '''.join(__UpperCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _A ( self: int , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_a = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
_a = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 487 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : Dict =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : str =[0, 25, 50]
__lowercase : int =[25, 50, 75]
__lowercase : int =fuzz.membership.trimf(X, abca)
__lowercase : str =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : Tuple =np.ones(75)
__lowercase : Optional[int] =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : str =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : Tuple =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : int =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Union[str, Any] =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : Optional[int] =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Any =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : List[str] =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 712 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Union[str, Any] =logging.get_logger(__name__)
__lowercase : List[Any] ={
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''gptsan-japanese'''
_snake_case =[
'''past_key_values''',
]
_snake_case ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Optional[Any] , _lowerCAmelCase: List[Any]=3_6000 , _lowerCAmelCase: List[Any]=1280 , _lowerCAmelCase: str=1024 , _lowerCAmelCase: Any=8192 , _lowerCAmelCase: str=4096 , _lowerCAmelCase: int=128 , _lowerCAmelCase: int=10 , _lowerCAmelCase: Dict=0 , _lowerCAmelCase: Any=16 , _lowerCAmelCase: Optional[int]=16 , _lowerCAmelCase: List[Any]=128 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: Optional[Any]=1e-5 , _lowerCAmelCase: int=False , _lowerCAmelCase: Optional[Any]=0.0 , _lowerCAmelCase: str="float32" , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Any=False , _lowerCAmelCase: int=False , _lowerCAmelCase: Union[str, Any]=0.0_02 , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: int=True , _lowerCAmelCase: List[str]=3_5998 , _lowerCAmelCase: Optional[int]=3_5995 , _lowerCAmelCase: Dict=3_5999 , **_lowerCAmelCase: Optional[Any] , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =d_model
UpperCAmelCase_ =d_ff
UpperCAmelCase_ =d_ext
UpperCAmelCase_ =d_spout
UpperCAmelCase_ =num_switch_layers
UpperCAmelCase_ =num_ext_layers
UpperCAmelCase_ =num_switch_layers + num_ext_layers
UpperCAmelCase_ =num_heads
UpperCAmelCase_ =num_experts
UpperCAmelCase_ =expert_capacity
UpperCAmelCase_ =dropout_rate
UpperCAmelCase_ =layer_norm_epsilon
UpperCAmelCase_ =router_bias
UpperCAmelCase_ =router_jitter_noise
UpperCAmelCase_ =router_dtype
UpperCAmelCase_ =router_ignore_padding_tokens
UpperCAmelCase_ =output_hidden_states
UpperCAmelCase_ =output_attentions
UpperCAmelCase_ =initializer_factor
UpperCAmelCase_ =output_router_logits
UpperCAmelCase_ =use_cache
super().__init__(
separator_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 550 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
__lowercase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=A__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=A__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=A__ )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
__lowercase = parse_args()
# Import training_script as a module.
__lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase = script_fpath.stem
__lowercase = importlib.import_module(A__ )
# Patch sys.argv
__lowercase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 41 | """simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = PriorTransformer
lowerCAmelCase__ : int = """hidden_states"""
@property
def A ( self ) -> Tuple:
a_ : Union[str, Any] = 4
a_ : Tuple = 8
a_ : Dict = 7
a_ : Dict = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self , _SCREAMING_SNAKE_CASE=0 ) -> int:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : int = 4
a_ : Dict = 8
a_ : Dict = 7
a_ : Dict = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def A ( self ) -> Optional[Any]:
return (4, 8)
@property
def A ( self ) -> Any:
return (4, 8)
def A ( self ) -> List[str]:
a_ : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
a_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def A ( self ) -> Dict:
a_ , a_ : str = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def A ( self ) -> str:
a_ , a_ : str = self.prepare_init_args_and_inputs_for_common()
a_ : List[Any] = self.model_class(**_SCREAMING_SNAKE_CASE )
a_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[Any] = [*signature.parameters.keys()]
a_ : Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def A ( self ) -> Any:
a_ : Tuple = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
a_ : Union[str, Any] = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
a_ : List[Any] = self.get_dummy_seed_input()
with torch.no_grad():
a_ : Dict = model(**_SCREAMING_SNAKE_CASE )[0]
a_ : Any = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a_ : Optional[int] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=7_7 , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = batch_size
a_ : Optional[Any] = embedding_dim
a_ : Tuple = num_embeddings
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
a_ : List[Any] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
a_ : int = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
a_ : str = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
a_ : int = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
| 473 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
a__ = None
ops.enable_eager_execution_internal()
a__ = tf.config.list_physical_devices('CPU' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
a__ = tf.config.list_logical_devices(device_type='CPU' )
a__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
a__ = GradientAccumulator()
a__ = tf.Variable([4.0, 3.0] )
a__ , a__ = create_optimizer(5e-5 , 10 , 5 )
a__ = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
a__ = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
a__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 126 |
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
a__ = val
a__ = None
a__ = None
def lowercase__ ( self , _a ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
a__ = Node(_a )
else:
self.left.insert(_a )
elif val > self.val:
if self.right is None:
a__ = Node(_a )
else:
self.right.insert(_a )
else:
a__ = val
def lowerCAmelCase_ ( a : Dict , a : Union[str, Any] ):
# Recursive traversal
if root:
inorder(root.left , a )
res.append(root.val )
inorder(root.right , a )
def lowerCAmelCase_ ( a : List[str] ):
# Build BST
if len(a ) == 0:
return arr
a__ = Node(arr[0] )
for i in range(1 , len(a ) ):
root.insert(arr[i] )
# Traverse BST in order.
a__ = []
inorder(a , a )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 126 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''ConvNextFeatureExtractor''']
a = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 7 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase__ ( __A ):
def __init__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , **_lowercase , ):
super().__init__(
_lowercase , split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , )
lowerCAmelCase_ : Union[str, Any] = field
lowerCAmelCase_ : List[Any] = path_or_paths if isinstance(_lowercase , _lowercase ) else {self.split: path_or_paths}
lowerCAmelCase_ : Optional[Any] = Json(
cache_dir=_lowercase , data_files=_lowercase , features=_lowercase , field=_lowercase , **_lowercase , )
def UpperCAmelCase__ ( self ):
# Build iterable dataset
if self.streaming:
lowerCAmelCase_ : Tuple = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , )
lowerCAmelCase_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=_lowercase , in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = None , **_lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
lowerCAmelCase_ : Any = dataset
lowerCAmelCase_ : List[Any] = path_or_buf
lowerCAmelCase_ : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCAmelCase_ : str = num_proc
lowerCAmelCase_ : Optional[int] = """utf-8"""
lowerCAmelCase_ : Dict = to_json_kwargs
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = self.to_json_kwargs.pop("""path_or_buf""" , _lowercase )
lowerCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("""orient""" , """records""" )
lowerCAmelCase_ : str = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
lowerCAmelCase_ : Dict = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
lowerCAmelCase_ : Tuple = self.to_json_kwargs.pop("""compression""" , _lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=_lowercase ) as buffer:
lowerCAmelCase_ : Dict = self._write(file_obj=_lowercase , orient=_lowercase , lines=_lowercase , index=_lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
lowerCAmelCase_ : int = self._write(
file_obj=self.path_or_buf , orient=_lowercase , lines=_lowercase , index=_lowercase , **self.to_json_kwargs )
return written
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = args
lowerCAmelCase_ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(_lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCAmelCase_ : str = batch.to_pandas().to_json(
path_or_buf=_lowercase , orient=_lowercase , lines=_lowercase , index=_lowercase , **_lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase , ):
lowerCAmelCase_ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
lowerCAmelCase_ : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_lowercase )
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _lowercase , _lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(_lowercase )
return written
| 440 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase__ ( __A ):
__UpperCamelCase = ["""vqvae"""]
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , ):
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase , mel=_lowercase , vqvae=_lowercase )
def UpperCAmelCase__ ( self ):
return 50 if isinstance(self.scheduler , _lowercase ) else 1_000
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = 0 , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase=True , ):
lowerCAmelCase_ : Tuple = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowercase )
lowerCAmelCase_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ : Tuple = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_lowercase , device=self.device , )
lowerCAmelCase_ : List[str] = noise
lowerCAmelCase_ : List[str] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowercase , _lowercase )
lowerCAmelCase_ : Tuple = self.mel.audio_slice_to_image(_lowercase )
lowerCAmelCase_ : List[Any] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ : Optional[Any] = (input_image / 255) * 2 - 1
lowerCAmelCase_ : Optional[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ : List[Any] = self.vqvae.encode(torch.unsqueeze(_lowercase , 0 ) ).latent_dist.sample(
generator=_lowercase )[0]
lowerCAmelCase_ : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ : str = self.scheduler.add_noise(_lowercase , _lowercase , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ : Union[str, Any] = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ : str = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ : List[Any] = self.scheduler.add_noise(_lowercase , _lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _lowercase ):
lowerCAmelCase_ : List[Any] = self.unet(_lowercase , _lowercase , _lowercase )["""sample"""]
else:
lowerCAmelCase_ : Any = self.unet(_lowercase , _lowercase )["""sample"""]
if isinstance(self.scheduler , _lowercase ):
lowerCAmelCase_ : str = self.scheduler.step(
model_output=_lowercase , timestep=_lowercase , sample=_lowercase , eta=_lowercase , generator=_lowercase , )["""prev_sample"""]
else:
lowerCAmelCase_ : List[str] = self.scheduler.step(
model_output=_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ : Dict = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ : Dict = self.vqvae.decode(_lowercase )["""sample"""]
lowerCAmelCase_ : List[Any] = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ : Optional[Any] = (images * 255).round().astype("""uint8""" )
lowerCAmelCase_ : str = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowercase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
lowerCAmelCase_ : str = [self.mel.image_to_audio(_lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_lowercase ) )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase = 50 ):
assert isinstance(self.scheduler , _lowercase )
self.scheduler.set_timesteps(_lowercase )
lowerCAmelCase_ : List[Any] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ : List[str] = (sample / 255) * 2 - 1
lowerCAmelCase_ : Optional[Any] = torch.Tensor(_lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ : int = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ : Optional[int] = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ : Any = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ : Optional[int] = 1 - alpha_prod_t
lowerCAmelCase_ : Union[str, Any] = self.unet(_lowercase , _lowercase )["""sample"""]
lowerCAmelCase_ : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[int] = acos(torch.dot(torch.flatten(_lowercase ) , torch.flatten(_lowercase ) ) / torch.norm(_lowercase ) / torch.norm(_lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowercase ) + sin(alpha * theta ) * xa / sin(_lowercase )
| 440 | 1 |
def __snake_case ( lowerCAmelCase_ ) -> int:
SCREAMING_SNAKE_CASE__ = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
for i in range(2 , max_n + 1 ):
SCREAMING_SNAKE_CASE__ = pre_numerator
SCREAMING_SNAKE_CASE__ = 2 * i // 3 if i % 3 == 0 else 1
SCREAMING_SNAKE_CASE__ = cur_numerator
SCREAMING_SNAKE_CASE__ = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 100 |
from math import factorial
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowercase__ : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__ : Tuple = float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 397 | 0 |
import csv
import tweepy
# Twitter API credentials
a__ = """"""
a__ = """"""
a__ = """"""
a__ = """"""
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> None:
# authorize twitter, initialize tweepy
_snake_case : Tuple = tweepy.OAuthHandler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
auth.set_access_token(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = tweepy.API(SCREAMING_SNAKE_CASE__ )
# initialize a list to hold all the tweepy Tweets
_snake_case : Tuple = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : Optional[int] = api.user_timeline(screen_name=SCREAMING_SNAKE_CASE__ , count=200 )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__ )
# save the id of the oldest tweet less one
_snake_case : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(SCREAMING_SNAKE_CASE__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Optional[Any] = api.user_timeline(
screen_name=SCREAMING_SNAKE_CASE__ , count=200 , max_id=SCREAMING_SNAKE_CASE__ )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__ )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F'''...{len(SCREAMING_SNAKE_CASE__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : Dict = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
_snake_case : List[str] = csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 198 |
from math import ceil, sqrt
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 1_000_000 ) -> int:
_snake_case : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_snake_case : int = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_snake_case : Any = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 198 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
A_ = IFPipeline
A_ = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCAmelCase__ ( self : Optional[int] )->Optional[int]:
'''simple docstring'''
return self._get_dummy_components()
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : int , _snake_case : int=0 )->List[str]:
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("""mps""" ):
__lowerCAmelCase : List[Any] = torch.manual_seed(_UpperCAmelCase )
else:
__lowerCAmelCase : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self : str )->Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase__ ( self : List[str] )->str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase__ ( self : Dict )->int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase__ ( self : Dict )->List[Any]:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any )->Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
__lowerCAmelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
__lowerCAmelCase : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowerCAmelCase : List[str] = IFImgaImgPipeline(**pipe_a.components )
__lowerCAmelCase : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowerCAmelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
__lowerCAmelCase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( self : Tuple , _snake_case : List[str] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Optional[int] )->List[str]:
'''simple docstring'''
_start_torch_memory_measurement()
__lowerCAmelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : List[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__lowerCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
__lowerCAmelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
__lowerCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( self : int , _snake_case : Dict , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[Any] )->Optional[Any]:
'''simple docstring'''
_start_torch_memory_measurement()
__lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : int = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
__lowerCAmelCase : str = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__lowerCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (256, 256, 3)
__lowerCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( self : Dict , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : Optional[int] )->Optional[Any]:
'''simple docstring'''
_start_torch_memory_measurement()
__lowerCAmelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
__lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__lowerCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
__lowerCAmelCase : Any = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
__lowerCAmelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
__lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 504 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Any = StableDiffusionInpaintPipeline
lowerCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase : Optional[Any] = frozenset([] )
def __lowercase ( self : Dict ):
torch.manual_seed(0 )
_a : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCAmelCase ,)
_a : Optional[int] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
_a : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_a : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('mps' ):
_a : str = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : List[str] ):
_a : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : int = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
_a : Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
_a : Tuple = sd_pipe(**_UpperCAmelCase ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : List[Any] = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ):
_a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_a : List[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_a : Tuple = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase ,safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : int = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : Dict = torch.manual_seed(0 )
_a : Any = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,)
_a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __lowercase ( self : Tuple ):
_a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_a : int = 'stabilityai/stable-diffusion-2-inpainting'
_a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCAmelCase ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = torch.manual_seed(0 )
_a : Dict = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,)
_a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowercase ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'stabilityai/stable-diffusion-2-inpainting'
_a : Any = PNDMScheduler.from_pretrained(_UpperCAmelCase ,subfolder='scheduler' )
_a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase ,safety_checker=_UpperCAmelCase ,scheduler=_UpperCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a : Any = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : Optional[Any] = torch.manual_seed(0 )
_a : int = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='np' ,)
_a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 358 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["DeiTFeatureExtractor"]
UpperCamelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 376 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
__lowerCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 20
__lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=snake_case__ )
# tweak scores to not be uniform anymore
__lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCAmelCase = jax.nn.softmax(snake_case__ , axis=-1 )
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(snake_case__ , scores.copy() , cur_len=snake_case__ ) , axis=-1 )
__lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(snake_case__ , scores.copy() , cur_len=snake_case__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 10
__lowerCAmelCase = 2
# create ramp distribution
__lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCAmelCase = 5
__lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, length) ).copy()
__lowerCAmelCase = top_k_warp_safety_check(snake_case__ , snake_case__ , cur_len=snake_case__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 10
__lowerCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
__lowerCAmelCase = np.exp(top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCAmelCase = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
__lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = 20
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ )
# check that min length is applied at length 5
__lowerCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCAmelCase = 5
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = min_dist_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = 15
__lowerCAmelCase = min_dist_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = 20
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
# check that all scores are -inf except the bos_token_id score
__lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCAmelCase = 1
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCAmelCase = 3
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = 20
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = 5
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCAmelCase = 4
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCAmelCase = 3
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = 4
__lowerCAmelCase = 10
__lowerCAmelCase = 15
__lowerCAmelCase = 2
__lowerCAmelCase = 1
__lowerCAmelCase = 15
# dummy input_ids and scores
__lowerCAmelCase = ids_tensor((batch_size, sequence_length) , snake_case__ )
__lowerCAmelCase = input_ids.copy()
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = scores.copy()
# instantiate all dist processors
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ )
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = 10
# no processor list
__lowerCAmelCase = temp_dist_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = min_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = bos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = eos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
# with processor list
__lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase = processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = 4
__lowerCAmelCase = 10
__lowerCAmelCase = 15
__lowerCAmelCase = 2
__lowerCAmelCase = 1
__lowerCAmelCase = 15
# dummy input_ids and scores
__lowerCAmelCase = ids_tensor((batch_size, sequence_length) , snake_case__ )
__lowerCAmelCase = input_ids.copy()
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = scores.copy()
# instantiate all dist processors
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ )
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = 10
# no processor list
def run_no_processor_list(snake_case__ : int , snake_case__ : Any , snake_case__ : int ):
__lowerCAmelCase = temp_dist_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = min_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = bos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = eos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
return scores
# with processor list
def run_processor_list(snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str ):
__lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase = processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
return scores
__lowerCAmelCase = jax.jit(snake_case__ )
__lowerCAmelCase = jax.jit(snake_case__ )
__lowerCAmelCase = jitted_run_no_processor_list(snake_case__ , snake_case__ , snake_case__ )
__lowerCAmelCase = jitted_run_processor_list(snake_case__ , snake_case__ , snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 376 | 1 |
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = size
lowerCamelCase = [0] * size
lowerCamelCase = [0] * size
@staticmethod
def __A ( A ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __A ( A ) -> Tuple:
'''simple docstring'''
return (index & (index + 1)) - 1
def __A ( self , A , A ) -> Any:
'''simple docstring'''
lowerCamelCase = value
while index < self.size:
lowerCamelCase = self.get_prev(_lowerCAmelCase ) + 1
if current_left_border == index:
lowerCamelCase = value
else:
lowerCamelCase = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase = self.get_next(_lowerCAmelCase )
def __A ( self , A , A ) -> Dict:
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowerCamelCase = 0
while left <= right:
lowerCamelCase = self.get_prev(_lowerCAmelCase )
if left <= current_left:
lowerCamelCase = max(_lowerCAmelCase , self.tree[right] )
lowerCamelCase = current_left
else:
lowerCamelCase = max(_lowerCAmelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 457 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
_lowercase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
_lowercase = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
_lowercase = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int]=None ,A_ : int=None ,A_ : str=False ) -> List[str]:
if concatenate_texts:
return compute_measures(A_ ,A_ )["wer"]
else:
A = 0
A = 0
for prediction, reference in zip(A_ ,A_ ):
A = compute_measures(A_ ,A_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 22 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = ['pixel_values']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : int = 0.9 , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = crop_pct
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[float] = None , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCAmelCase__ = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCAmelCase__ = int(size["height"] / crop_pct )
else:
lowerCAmelCase__ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(__magic_name__ ) )
lowerCAmelCase__ = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
else:
if "shortest_edge" in size:
lowerCAmelCase__ = get_resize_output_image_size(__magic_name__ , size=size["shortest_edge"] , default_to_square=__magic_name__ )
elif "height" in size and "width" in size:
lowerCAmelCase__ = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(__magic_name__ ) )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : int = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 48 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
__lowerCAmelCase : str ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """distilbert"""
__lowercase = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self :Tuple , lowercase_ :List[str]=3_05_22 , lowercase_ :Tuple=5_12 , lowercase_ :List[str]=False , lowercase_ :int=6 , lowercase_ :Optional[int]=12 , lowercase_ :Optional[int]=7_68 , lowercase_ :str=4 * 7_68 , lowercase_ :List[Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :Optional[int]="gelu" , lowercase_ :Optional[Any]=0.0_2 , lowercase_ :Optional[Any]=0.1 , lowercase_ :List[Any]=0.2 , lowercase_ :str=0 , **lowercase_ :Union[str, Any] , )-> str:
A__ = vocab_size
A__ = max_position_embeddings
A__ = sinusoidal_pos_embds
A__ = n_layers
A__ = n_heads
A__ = dim
A__ = hidden_dim
A__ = dropout
A__ = attention_dropout
A__ = activation
A__ = initializer_range
A__ = qa_dropout
A__ = seq_classif_dropout
super().__init__(**lowercase_ , pad_token_id=lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
@property
def UpperCAmelCase_ ( self :List[str] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 440 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class a ( UpperCAmelCase ):
_lowercase = "lxmert"
_lowercase = {}
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=9500 , A_=1600 , A_=400 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=9 , A_=5 , A_=5 , A_=2048 , A_=4 , A_=6.67 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=True , A_=True , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : int = type_vocab_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Dict = num_qa_labels
_UpperCAmelCase : Optional[int] = num_object_labels
_UpperCAmelCase : Tuple = num_attr_labels
_UpperCAmelCase : str = l_layers
_UpperCAmelCase : Union[str, Any] = x_layers
_UpperCAmelCase : List[str] = r_layers
_UpperCAmelCase : List[str] = visual_feat_dim
_UpperCAmelCase : Dict = visual_pos_dim
_UpperCAmelCase : Optional[Any] = visual_loss_normalizer
_UpperCAmelCase : Any = task_matched
_UpperCAmelCase : int = task_mask_lm
_UpperCAmelCase : Union[str, Any] = task_obj_predict
_UpperCAmelCase : str = task_qa
_UpperCAmelCase : List[str] = visual_obj_loss
_UpperCAmelCase : Tuple = visual_attr_loss
_UpperCAmelCase : List[str] = visual_feat_loss
_UpperCAmelCase : Optional[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**A_ )
| 710 |
import logging
import os
from .state import PartialState
class a ( logging.LoggerAdapter ):
@staticmethod
def _UpperCAmelCase ( A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , A_ , A_ , *A_ , **A_ ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
_UpperCAmelCase : Tuple = kwargs.pop("main_process_only" , A_ )
_UpperCAmelCase : int = kwargs.pop("in_order" , A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
elif in_order:
_UpperCAmelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str = None ) -> List[Any]:
if log_level is None:
_UpperCAmelCase : List[str] = os.environ.get("ACCELERATE_LOG_LEVEL" , lowerCAmelCase )
_UpperCAmelCase : str = logging.getLogger(lowerCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase , {} )
| 467 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
_lowerCamelCase : Any = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def A__ ( __A : Union[str, Any] ) ->List[Any]:
__A =EfficientNetConfig()
__A =CONFIG_MAP[model_name]['''hidden_dim''']
__A =CONFIG_MAP[model_name]['''width_coef''']
__A =CONFIG_MAP[model_name]['''depth_coef''']
__A =CONFIG_MAP[model_name]['''image_size''']
__A =CONFIG_MAP[model_name]['''dropout_rate''']
__A =CONFIG_MAP[model_name]['''dw_padding''']
__A ='''huggingface/label-files'''
__A ='''imagenet-1k-id2label.json'''
__A =10_00
__A =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
__A ={int(__A ): v for k, v in idalabel.items()}
__A =idalabel
__A ={v: k for k, v in idalabel.items()}
return config
def A__ ( ) ->Dict:
__A ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A =Image.open(requests.get(__A , stream=__A ).raw )
return im
def A__ ( __A : int ) ->Optional[Any]:
__A =CONFIG_MAP[model_name]['''image_size''']
__A =EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=__A , )
return preprocessor
def A__ ( __A : Tuple ) ->Optional[int]:
__A =[v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__A =sorted(set(__A ) )
__A =len(__A )
__A ={b: str(__A ) for b, i in zip(__A , range(__A ) )}
__A =[]
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__A =block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__A ={}
for item in rename_keys:
if item[0] in original_param_names:
__A ='''efficientnet.''' + item[1]
__A ='''classifier.weight'''
__A ='''classifier.bias'''
return key_mapping
def A__ ( __A : List[Any] , __A : List[Any] , __A : int ) ->Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
__A =key_mapping[key]
if "_conv" in key and "kernel" in key:
__A =torch.from_numpy(__A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__A =torch.from_numpy(__A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__A =torch.from_numpy(np.transpose(__A ) )
else:
__A =torch.from_numpy(__A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__A )
@torch.no_grad()
def A__ ( __A : int , __A : Dict , __A : List[str] , __A : List[str] ) ->List[Any]:
__A =model_classes[model_name](
include_top=__A , weights='''imagenet''' , input_tensor=__A , input_shape=__A , pooling=__A , classes=10_00 , classifier_activation='''softmax''' , )
__A =original_model.trainable_variables
__A =original_model.non_trainable_variables
__A ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__A =param.numpy()
__A =list(tf_params.keys() )
# Load HuggingFace model
__A =get_efficientnet_config(__A )
__A =EfficientNetForImageClassification(__A ).eval()
__A =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__A =rename_keys(__A )
replace_params(__A , __A , __A )
# Initialize preprocessor and preprocess input image
__A =convert_image_processor(__A )
__A =preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__A =hf_model(**__A )
__A =outputs.logits.detach().numpy()
# Original model inference
__A =False
__A =CONFIG_MAP[model_name]['''image_size''']
__A =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__A =image.img_to_array(__A )
__A =np.expand_dims(__A , axis=0 )
__A =original_model.predict(__A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__A , __A , atol=1e-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__A ):
os.mkdir(__A )
# Save converted model and image processor
hf_model.save_pretrained(__A )
preprocessor.save_pretrained(__A )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
__A =F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__A )
hf_model.push_to_hub(__A )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 184 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A__ ( __A : Any , __A : str , __A : str , __A : Path , __A : str = None , __A : str = None , __A : str = None , ) ->Optional[Any]:
if config_name_or_path is None:
__A ='''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
__A =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__A =question_encoder_name_or_path
__A =RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
__A =RagConfig.from_pretrained(__A )
__A =AutoConfig.from_pretrained(__A )
__A =AutoConfig.from_pretrained(__A )
__A =gen_config
__A =question_encoder_config
__A =model_class.from_pretrained_question_encoder_generator(
__A , __A , config=__A )
rag_model.save_pretrained(__A )
# Sanity check.
model_class.from_pretrained(__A )
# Save tokenizers.
__A =AutoTokenizer.from_pretrained(__A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
__A =AutoTokenizer.from_pretrained(__A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : Optional[int] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 184 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''poolformer'''
def __init__( self : str , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Union[str, Any]=4.0 , UpperCAmelCase__ : Optional[int]=[2, 2, 6, 2] , UpperCAmelCase__ : Optional[Any]=[64, 128, 320, 512] , UpperCAmelCase__ : List[Any]=[7, 3, 3, 3] , UpperCAmelCase__ : Optional[Any]=[4, 2, 2, 2] , UpperCAmelCase__ : List[Any]=[2, 1, 1, 1] , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=1e-5 , UpperCAmelCase__ : List[Any]=0.02 , **UpperCAmelCase__ : Dict , ) ->Union[str, Any]:
'''simple docstring'''
A__ = num_channels
A__ = patch_size
A__ = stride
A__ = padding
A__ = pool_size
A__ = hidden_sizes
A__ = mlp_ratio
A__ = depths
A__ = patch_sizes
A__ = strides
A__ = num_encoder_blocks
A__ = drop_path_rate
A__ = hidden_act
A__ = use_layer_scale
A__ = layer_scale_init_value
A__ = initializer_range
super().__init__(**UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : int) ->float:
'''simple docstring'''
return 2e-3
| 721 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_lowerCamelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , lowercase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
A__ = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = train_dataset.features['''label'''].names
if training_args.do_eval:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = eval_dataset.features['''label'''].names
if training_args.do_predict:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = predict_dataset.features['''label'''].names
# Labels
A__ = len(lowercase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , idalabel={str(lowercase_ ): label for i, label in enumerate(lowercase_ )} , labelaid={label: i for i, label in enumerate(lowercase_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
A__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A__ = False
def preprocess_function(lowercase_ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=lowercase_ , max_length=data_args.max_seq_length , truncation=lowercase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
A__ = train_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowercase_ ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
A__ = eval_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_predict_samples )
A__ = predict_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
A__ = predict_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
A__ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
A__ = p.predictions[0] if isinstance(p.predictions , lowercase_ ) else p.predictions
A__ = np.argmax(lowercase_ , axis=1 )
return metric.compute(predictions=lowercase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A__ = default_data_collator
elif training_args.fpaa:
A__ = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 )
else:
A__ = None
# Initialize our Trainer
A__ = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=lowercase_ )
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase_ )
trainer.save_metrics('''train''' , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A__ = trainer.evaluate(eval_dataset=lowercase_ )
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''eval''' , lowercase_ )
trainer.save_metrics('''eval''' , lowercase_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
A__ , A__ , A__ = trainer.predict(lowercase_ , metric_key_prefix='''predict''' )
A__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase_ )
)
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''predict''' , lowercase_ )
trainer.save_metrics('''predict''' , lowercase_ )
A__ = np.argmax(lowercase_ , axis=1 )
A__ = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase_ ):
A__ = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 177 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCamelCase__ : Dict = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowerCamelCase__ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ : List[str] = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCamelCase__ : Optional[Any] = """allenai"""
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : Any = dict((re.sub(R"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
lowercase__ : List[str] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase__ : int = d[k] # restore
return da
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
assert os.path.exists(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__ : int = basename(lowercase_ )
lowercase__ : List[Any] = dirname(lowercase_ )
lowercase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__ : int = cls.hub_models()
lowercase__ : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowercase__ : Optional[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
lowercase__ : Optional[int] = hub_utils.from_pretrained(
lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ )
lowercase__ : Tuple = vars(chkpt["""args"""]["""model"""] )
lowercase__ : List[Any] = args["""source_lang"""]
lowercase__ : Dict = args["""target_lang"""]
lowercase__ : Optional[Any] = dirname(lowercase_ )
lowercase__ : int = basename(lowercase_ )
# dicts
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , F'dict.{src_lang}.txt' )
lowercase__ : Optional[int] = os.path.join(lowercase_ , F'dict.{tgt_lang}.txt' )
lowercase__ : Optional[int] = Dictionary.load(lowercase_ )
lowercase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
lowercase__ : str = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-src.json""" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__ : Any = True
for k in src_vocab.keys():
if not k.islower():
lowercase__ : List[str] = False
break
lowercase__ : Tuple = Dictionary.load(lowercase_ )
lowercase__ : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase__ : Any = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-tgt.json""" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ):
break
with open(lowercase_ , encoding="""utf-8""" ) as fin:
lowercase__ : List[Any] = fin.read()
lowercase__ : Optional[Any] = re.sub(R""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowercase_ )
# model config
lowercase__ : Tuple = os.path.join(lowercase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowercase__ : List[str] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowercase__ : Optional[int] = 5
lowercase__ : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__ : Optional[int] = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowercase__ : Union[str, Any] = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
lowercase__ : int = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 10_24,
"""do_lower_case""": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
lowercase__ : Dict = chkpt["""models"""][0]
lowercase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase__ : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__ : List[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
lowercase__ : str = FSMTConfig.from_pretrained(lowercase_ )
lowercase__ : List[str] = FSMTForConditionalGeneration(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ , strict=lowercase_ )
# save
lowercase__ : str = os.path.join(lowercase_ , lowercase_ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''bloom'''
SCREAMING_SNAKE_CASE_ = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_5_0_8_8_0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase__ : List[str] = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = hidden_size if n_embed is None else n_embed
UpperCamelCase__ : Tuple = n_layer
UpperCamelCase__ : Dict = n_head
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : str = use_cache
UpperCamelCase__ : Any = pretraining_tp
UpperCamelCase__ : Union[str, Any] = apply_residual_connection_post_layernorm
UpperCamelCase__ : int = hidden_dropout
UpperCamelCase__ : Any = attention_dropout
UpperCamelCase__ : Any = bos_token_id
UpperCamelCase__ : List[str] = eos_token_id
UpperCamelCase__ : List[str] = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = version.parse('''1.12''' )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "default" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
UpperCamelCase__ : Any = 0
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCamelCase__ : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCamelCase__ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
"""simple docstring"""
return 1e-3
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
UpperCamelCase__ : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase__ : List[Any] = seqlen + 2
UpperCamelCase__ : Any = self._config.hidden_size // self.num_attention_heads
UpperCamelCase__ : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase__ : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase__ : List[Any] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
UpperCamelCase__ : Tuple = common_inputs['''attention_mask''']
if self.use_past:
UpperCamelCase__ : List[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCamelCase__ : Tuple = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return 1_3
| 462 |
import torch
from transformers import AutoModel
class _lowerCamelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ) -> Optional[Any]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase__ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase__ : List[Any] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ) -> str:
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = W_supports['''sizes'''].tolist()
UpperCamelCase__ : Tuple = W_supports['''start_token_id'''].item()
UpperCamelCase__ : int = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase__ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Any = W_supports['''input_ids'''] == start_token_id
UpperCamelCase__ : Optional[int] = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase__ : int = 0
else:
UpperCamelCase__ : Optional[int] = support_sizes[i - 1]
UpperCamelCase__ : List[Any] = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase__ : Optional[Any] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase__ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase__ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase__ : Union[str, Any] = torch.vstack((p_starts, p_start) )
UpperCamelCase__ : str = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase__ : List[Any] = p_start
UpperCamelCase__ : List[str] = p_end
return p_starts, p_ends
| 462 | 1 |
from __future__ import annotations
def UpperCamelCase_ ( __a , __a ) -> list[list[int]]:
a__ : list[list[int]] = []
a__ : list[int] = []
a__ : List[Any] = 0
a__ : Dict = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a , ) -> None:
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
UpperCamelCase : Optional[int] = [3, 34, 4, 12, 5, 2]
UpperCamelCase : Tuple = 9
UpperCamelCase : Union[str, Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 37 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ):
a__ : str = parent
a__ : Any = batch_size
a__ : Dict = patch_size
a__ : List[Any] = max_length
a__ : str = num_mel_bins
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_labels
a__ : List[Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : str = scope
a__ : List[str] = frequency_stride
a__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
a__ : Tuple = frequency_out_dimension * time_out_dimension
a__ : List[str] = num_patches + 2
def _UpperCamelCase( self : List[str] ):
a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
a__ : List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : str ):
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) : Optional[int] = config_and_inputs
a__ : List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase( self : str ):
a__ : str = ASTModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : Tuple ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
a__, a__ : List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : List[str] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.default_feature_extractor
a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ )
a__ : Any = self.default_feature_extractor
a__, a__ : Dict = prepare_audio()
a__ : str = audio.squeeze().numpy()
a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase__( __a ):
'''simple docstring'''
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[Any]=25_08_80 , lowerCamelCase_ :List[Any]=25_60 , lowerCamelCase_ :List[str]=36 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :int=1_02_40 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :str=5_14 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :int=1E-05 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Dict = use_cache
SCREAMING_SNAKE_CASE : Any = classifier_dropout
class lowercase__( __a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowercase = 'roc_bert'
def __init__( self : str , lowerCamelCase__ : Union[str, Any]=30_522 , lowerCamelCase__ : Union[str, Any]=768 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Optional[Any]=3_072 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Any=512 , lowerCamelCase__ : str=2 , lowerCamelCase__ : int=0.02 , lowerCamelCase__ : str=1E-12 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : str=0 , lowerCamelCase__ : Optional[int]="absolute" , lowerCamelCase__ : str=None , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=768 , lowerCamelCase__ : Dict=910 , lowerCamelCase__ : Optional[Any]=512 , lowerCamelCase__ : str=24_858 , lowerCamelCase__ : Tuple=True , **lowerCamelCase__ : Optional[int] , ):
a__ : Union[str, Any] = vocab_size
a__ : Tuple = max_position_embeddings
a__ : Any = hidden_size
a__ : int = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Dict = intermediate_size
a__ : Tuple = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : int = initializer_range
a__ : List[Any] = type_vocab_size
a__ : List[str] = layer_norm_eps
a__ : List[str] = use_cache
a__ : int = enable_pronunciation
a__ : Tuple = enable_shape
a__ : str = pronunciation_embed_dim
a__ : Tuple = pronunciation_vocab_size
a__ : Optional[Any] = shape_embed_dim
a__ : Optional[int] = shape_vocab_size
a__ : List[Any] = concat_input
a__ : Optional[int] = position_embedding_type
a__ : Any = classifier_dropout
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 37 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'informer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.0_5 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.0_2 , _lowerCAmelCase=True , _lowerCAmelCase = "prob" , _lowerCAmelCase = 5 , _lowerCAmelCase = True , **_lowerCAmelCase , ):
# time series specific configuration
UpperCAmelCase__ : List[str] = prediction_length
UpperCAmelCase__ : Optional[Any] = context_length or prediction_length
UpperCAmelCase__ : str = distribution_output
UpperCAmelCase__ : int = loss
UpperCAmelCase__ : Optional[Any] = input_size
UpperCAmelCase__ : Any = num_time_features
UpperCAmelCase__ : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase__ : Union[str, Any] = scaling
UpperCAmelCase__ : Optional[Any] = num_dynamic_real_features
UpperCAmelCase__ : List[str] = num_static_real_features
UpperCAmelCase__ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : List[str] = cardinality
else:
UpperCAmelCase__ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : str = embedding_dimension
else:
UpperCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ : Any = d_model
UpperCAmelCase__ : int = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : int = encoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_ffn_dim
UpperCAmelCase__ : List[Any] = encoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_layers
UpperCAmelCase__ : Tuple = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : List[str] = activation_dropout
UpperCAmelCase__ : Any = encoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = decoder_layerdrop
UpperCAmelCase__ : Tuple = activation_function
UpperCAmelCase__ : Dict = init_std
UpperCAmelCase__ : str = use_cache
# Informer
UpperCAmelCase__ : Union[str, Any] = attention_type
UpperCAmelCase__ : int = sampling_factor
UpperCAmelCase__ : Any = distil
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 79 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[Any]:
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 704 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase_ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str:
if "://" in dataset_path:
__UpperCAmelCase =dataset_path.split('''://''' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> int:
__UpperCAmelCase =not is_remote_filesystem(snake_case__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case__ ) , fs._strip_protocol(snake_case__ ) )
else:
fs.mv(snake_case__ , snake_case__ , recursive=snake_case__ )
def SCREAMING_SNAKE_CASE ( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =threading.Lock()
| 142 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : Union[str, Any] = logging.getLogger()
def _lowercase ( __UpperCamelCase : int ):
snake_case__ = {}
snake_case__ = os.path.join(__UpperCamelCase , """all_results.json""" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , """r""" ) as f:
snake_case__ = json.load(__UpperCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowerCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
import xla_spawn
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
snake_case__ = time()
xla_spawn.main()
snake_case__ = time()
snake_case__ = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
import xla_spawn
snake_case__ = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
xla_spawn.main()
| 214 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowercase ( __UpperCamelCase : str , __UpperCamelCase : str , **__UpperCamelCase : Union[str, Any] ):
snake_case__ = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
snake_case__ = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 214 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = 'pt'
elif is_tf_available():
UpperCamelCase = 'tf'
else:
UpperCamelCase = 'jax'
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ByTaTokenizer
snake_case__ = False
def a ( self : Tuple ) -> str:
super().setUp()
lowerCAmelCase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a ( self : int ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=20 , SCREAMING_SNAKE_CASE__ : Any=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
lowerCAmelCase__ = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
lowerCAmelCase__ = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
lowerCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
lowerCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
lowerCAmelCase__ = " " + output_txt
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def a ( self : Any ) -> Optional[int]:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowerCAmelCase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = "Unicode €."
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , SCREAMING_SNAKE_CASE__ )
# decoding
lowerCAmelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , "Unicode €.</s>" )
lowerCAmelCase__ = tokenizer("e è é ê ë" )
lowerCAmelCase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , SCREAMING_SNAKE_CASE__ )
# decoding
lowerCAmelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def a ( self : Any ) -> Any:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowerCAmelCase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
lowerCAmelCase__ = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , SCREAMING_SNAKE_CASE__ )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE__ )
self.assertNotIn("decoder_input_ids" , SCREAMING_SNAKE_CASE__ )
self.assertNotIn("decoder_attention_mask" , SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Any:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = [
"Summary of the text.",
"Another summary.",
]
lowerCAmelCase__ = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding="max_length" , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ["A long paragraph for summarization. </s>"]
lowerCAmelCase__ = ["Summary of the text. </s>"]
# fmt: off
lowerCAmelCase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCAmelCase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch["input_ids"][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch["labels"][0] )
def a ( self : Tuple ) -> str:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = " He is very happy, UNwant\u00E9d,running"
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowerCAmelCase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Any:
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [f'<extra_id_{i}>' for i in range(125 )]
lowerCAmelCase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowerCAmelCase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=SCREAMING_SNAKE_CASE__ )]
lowerCAmelCase__ = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def a ( self : Union[str, Any] ) -> str:
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def a ( self : Dict ) -> Optional[Any]:
pass
def a ( self : Optional[int] ) -> Dict:
pass
def a ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def a ( self : List[Any] ) -> List[str]:
pass
def a ( self : Optional[int] ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCAmelCase__ = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowerCAmelCase__ = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Any:
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ , attr + "_id" , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + "_id" ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , attr + "_id" , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + "_id" ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , "additional_special_tokens_ids" ) , [] )
setattr(SCREAMING_SNAKE_CASE__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 125 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 5_0),)
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : Dict ) -> Any:
pass
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : List[str] , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def a ( self : Optional[int] ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
lowerCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self : Tuple ) -> int:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a ( self : List[str] ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> List[str]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
def a ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a ( self : Any ) -> Tuple:
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_580 ) < 1e-3
def a ( self : int ) -> Dict:
lowerCAmelCase__ = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_878 ) < 1e-3
def a ( self : Any ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_995 ) < 1e-3
def a ( self : int ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_434 ) < 1e-3
| 125 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : List[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=32 , snake_case__ : int=16 , snake_case__ : Union[str, Any]=3 , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : Optional[Any]=32 , snake_case__ : int=4 , snake_case__ : Dict=[0, 1, 2, 3] , snake_case__ : List[Any]=4 , snake_case__ : Tuple=37 , snake_case__ : Dict="gelu" , snake_case__ : str=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[int]=0.02 , snake_case__ : Tuple=3 , snake_case__ : Any=[1, 384, 24, 24] , snake_case__ : Optional[int]=True , snake_case__ : Any=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = backbone_out_indices
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = backbone_featmap_shape
lowerCAmelCase__ = scope
lowerCAmelCase__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=snake_case__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any , snake_case__ : int , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = DPTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DPTForDepthEstimation(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Any ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DPTForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Union[str, Any] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = DPTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
if model_class in get_values(snake_case__ ):
continue
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase__ = model(**snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : str ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = False
lowerCAmelCase__ = True
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase__ = model(**snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=snake_case__ )
# Skip the check for the backbone
lowerCAmelCase__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCAmelCase__ = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCAmelCase__ = DPTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = """add"""
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = DPTForDepthEstimation(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
lowerCAmelCase__ = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(snake_case__ )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**snake_case__ )
lowerCAmelCase__ = outputs.predicted_depth
# verify the predicted depth
lowerCAmelCase__ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , snake_case__ )
lowerCAmelCase__ = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , snake_case__ , atol=1E-4 ) )
| 644 | """simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]=3 , snake_case__ : int=18 , snake_case__ : Dict=30 , snake_case__ : Optional[Any]=400 , snake_case__ : Tuple=None , snake_case__ : List[Any]=True , snake_case__ : Any=True , snake_case__ : Dict=None , ):
lowerCAmelCase__ = size if size is not None else {"""height""": 20, """width""": 20}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = do_convert_rgb
lowerCAmelCase__ = [512, 1024, 2048, 4096]
lowerCAmelCase__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def _SCREAMING_SNAKE_CASE ( self : str ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
lowerCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = PixaStructImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case__ , """do_convert_rgb""" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.image_processor_tester.prepare_dummy_image()
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ = 2048
lowerCAmelCase__ = image_processor(snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _SCREAMING_SNAKE_CASE ( self : str ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
lowerCAmelCase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
lowerCAmelCase__ = """Hello"""
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = PixaStructImageProcessingTester(self , num_channels=4 )
lowerCAmelCase__ = 3
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case__ , """do_convert_rgb""" ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCAmelCase__ = image_processor(
snake_case__ , return_tensors="""pt""" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 644 | 1 |
import random
from typing import Any
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list[Any]:
for _ in range(len(UpperCamelCase_ ) ):
UpperCamelCase_ = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase_ = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase_ , UpperCamelCase_ = data[b], data[a]
return data
if __name__ == "__main__":
_UpperCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_UpperCAmelCase = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 371 |
import random
from typing import Any
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list[Any]:
for _ in range(len(UpperCamelCase_ ) ):
UpperCamelCase_ = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase_ = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase_ , UpperCamelCase_ = data[b], data[a]
return data
if __name__ == "__main__":
_UpperCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_UpperCAmelCase = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 371 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]=13 , lowerCamelCase : str=7 , lowerCamelCase : Any=True , lowerCamelCase : Tuple=True , lowerCamelCase : int=True , lowerCamelCase : int=True , lowerCamelCase : Dict=True , lowerCamelCase : Tuple=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : int=2 , lowerCamelCase : Optional[int]=99 , lowerCamelCase : List[Any]=0 , lowerCamelCase : Any=32 , lowerCamelCase : Optional[Any]=5 , lowerCamelCase : int=4 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Tuple=512 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : List[str]=4 , lowerCamelCase : Any="last" , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[Any]=None , ) -> Dict:
__snake_case : Any = parent
__snake_case : Any = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : List[Any] = use_input_lengths
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Union[str, Any] = gelu_activation
__snake_case : Optional[int] = sinusoidal_embeddings
__snake_case : Optional[int] = causal
__snake_case : List[str] = asm
__snake_case : Optional[int] = n_langs
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = n_special
__snake_case : Any = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Any = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : List[str] = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : Optional[Any] = num_labels
__snake_case : List[str] = num_choices
__snake_case : Optional[Any] = summary_type
__snake_case : int = use_proj
__snake_case : Optional[Any] = scope
def __snake_case ( self : Any ) -> List[Any]:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : int = None
if self.use_input_lengths:
__snake_case : Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case : Any = None
if self.use_token_type_ids:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case : str = None
__snake_case : Optional[Any] = None
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Tuple = ids_tensor([self.batch_size] , 2 ).float()
__snake_case : Any = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __snake_case ( self : Optional[Any] ) -> Dict:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , ) -> Dict:
__snake_case : List[str] = FlaubertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : str = model(lowerCamelCase , lengths=lowerCamelCase , langs=lowerCamelCase )
__snake_case : List[Any] = model(lowerCamelCase , langs=lowerCamelCase )
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , ) -> Optional[Any]:
__snake_case : Optional[Any] = FlaubertWithLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : List[Any] , ) -> List[str]:
__snake_case : Union[str, Any] = FlaubertForQuestionAnsweringSimple(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(lowerCamelCase )
__snake_case : Optional[int] = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict , ) -> Union[str, Any]:
__snake_case : str = FlaubertForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase )
__snake_case : Dict = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , p_mask=lowerCamelCase , )
__snake_case : List[Any] = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , )
((__snake_case) , ) : Union[str, Any] = result_with_labels.to_tuple()
__snake_case : List[Any] = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
((__snake_case) , ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , ) -> str:
__snake_case : Union[str, Any] = FlaubertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , ) -> str:
__snake_case : Any = self.num_labels
__snake_case : Union[str, Any] = FlaubertForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case : List[str] = self.num_choices
__snake_case : Any = FlaubertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Union[str, Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Tuple ) -> Tuple:
__snake_case : int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __snake_case ( self : List[str] , lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]=False ) -> List[str]:
__snake_case : Optional[Any] = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__snake_case : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
__snake_case : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def __snake_case ( self : Tuple ) -> int:
__snake_case : int = FlaubertModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 )
def __snake_case ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> str:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase )
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase )
def __snake_case ( self : int ) -> Tuple:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase )
def __snake_case ( self : int ) -> Tuple:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCamelCase )
@slow
def __snake_case ( self : Any ) -> str:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = FlaubertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
@require_torch_gpu
def __snake_case ( self : str ) -> int:
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__snake_case : str = True
__snake_case : List[Any] = model_class(config=lowerCamelCase )
__snake_case : Union[str, Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__snake_case : str = torch.jit.trace(
lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase , os.path.join(lowerCamelCase , "traced_model.pt" ) )
__snake_case : Union[str, Any] = torch.jit.load(os.path.join(lowerCamelCase , "traced_model.pt" ) , map_location=lowerCamelCase )
loaded(inputs_dict["input_ids"].to(lowerCamelCase ) , inputs_dict["attention_mask"].to(lowerCamelCase ) )
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
__snake_case : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
__snake_case : List[str] = model(lowerCamelCase )[0]
__snake_case : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCamelCase )
__snake_case : Dict = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 81 |
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = sorted(zip(snake_case__ , snake_case__ ) , key=lambda snake_case__ : x[0] / x[1] , reverse=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ = list(accumulate(snake_case__ ) )
lowerCAmelCase__ = bisect(snake_case__ , snake_case__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 | 0 |
def a ( snake_case__: int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
lowercase_ = str(snake_case__ )
lowercase_ = ''''''.join(sorted(snake_case__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def a ( snake_case__: float = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
lowercase_ = 0
lowercase_ = 1
while True:
if check_bouncy(snake_case__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(9_9)}")
| 409 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : int=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , SCREAMING_SNAKE_CASE_ : Tuple=None , ) -> Optional[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : str ) -> Tuple:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
lowercase_ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
lowercase_ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any:
lowercase_ = self.num_labels
lowercase_ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
lowercase_ = self.num_choices
lowercase_ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Dict ) -> Optional[int]:
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a :Dict = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a :List[Any] = True
a :Tuple = True
a :List[str] = True
a :Optional[Any] = True
def _lowercase ( self : str ) -> Dict:
lowercase_ = DistilBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=3_7 )
def _lowercase ( self : Tuple ) -> str:
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> Any:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : List[Any] ) -> Optional[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def _lowercase ( self : int ) -> Any:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase_ = True
lowercase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowercase_ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase_ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 409 | 1 |
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
if len(_a ) <= 1:
return lst
lowercase : List[str] = 1
while i < len(_a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase : Optional[Any] = 1
return lst
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted)) | 217 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cécé herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 0 |
import math
import tensorflow as tf
from packaging import version
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
__UpperCamelCase : int = tf.convert_to_tensor(_lowerCamelCase)
__UpperCamelCase : List[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0) , x.dtype)))
return x * cdf
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Dict = tf.convert_to_tensor(_lowerCamelCase)
__UpperCamelCase : Optional[int] = tf.cast(math.pi , x.dtype)
__UpperCamelCase : str = tf.cast(0.0_4_4_7_1_5 , x.dtype)
__UpperCamelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(_lowerCamelCase , 3))))
return x * cdf
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str]) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = tf.convert_to_tensor(_lowerCamelCase)
return x * tf.tanh(tf.math.softplus(_lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCamelCase)
__UpperCamelCase : Optional[int] = tf.cast(0.0_4_4_7_1_5 , x.dtype)
__UpperCamelCase : Dict = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype)
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x)))
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[str] = tf.convert_to_tensor(_lowerCamelCase)
__UpperCamelCase : Dict = tf.cast(1.7_0_2 , x.dtype)
return x * tf.math.sigmoid(coeff * x)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> int:
'''simple docstring'''
return tf.clip_by_value(_gelu(_lowerCamelCase) , -10 , 10)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=-1) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : List[str] = tf.split(_lowerCamelCase , 2 , axis=_lowerCamelCase)
return a * tf.math.sigmoid(_lowerCamelCase)
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Optional[int]:
'''simple docstring'''
return tf.keras.activations.gelu(_lowerCamelCase , approximate=_lowerCamelCase)
lowercase : Tuple = tf.keras.activations.gelu
lowercase : Tuple = approximate_gelu_wrap
else:
lowercase : Optional[Any] = _gelu
lowercase : List[Any] = _gelu_new
lowercase : Dict = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> str:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys())}') | 94 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> None:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[Any] = analyze_text(_lowerCamelCase)
__UpperCamelCase : List[str] = list(" " + ascii_lowercase)
# what is our total sum of probabilities.
__UpperCamelCase : Any = sum(single_char_strings.values())
# one length string
__UpperCamelCase : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__UpperCamelCase : List[Any] = single_char_strings[ch]
__UpperCamelCase : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum):.1f}')
# two len string
__UpperCamelCase : Optional[Any] = sum(two_char_strings.values())
__UpperCamelCase : Tuple = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__UpperCamelCase : List[str] = cha + cha
if sequence in two_char_strings:
__UpperCamelCase : Optional[Any] = two_char_strings[sequence]
__UpperCamelCase : Any = int(_lowerCamelCase) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase)
# print second entropy
print(F'{round(-1 * my_sec_sum):.1f}')
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> tuple[dict, dict]:
'''simple docstring'''
__UpperCamelCase : Tuple = Counter() # type: ignore
__UpperCamelCase : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase) - 1):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 94 | 1 |
'''simple docstring'''
from __future__ import annotations
_lowerCamelCase : List[Any] = list[tuple[int, int]]
_lowerCamelCase : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCamelCase : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : Node | None , ):
"""simple docstring"""
UpperCamelCase = pos_x
UpperCamelCase = pos_y
UpperCamelCase = (pos_y, pos_x)
UpperCamelCase = goal_x
UpperCamelCase = goal_y
UpperCamelCase = g_cost
UpperCamelCase = parent
UpperCamelCase = self.calculate_heuristic()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = abs(self.pos_x - self.goal_x )
UpperCamelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : tuple[int, int] ):
"""simple docstring"""
UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , UpperCamelCase__ )
UpperCamelCase = [self.start]
UpperCamelCase = []
UpperCamelCase = False
def A ( self : List[str] ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase = True
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
UpperCamelCase = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def A ( self : List[str] , UpperCamelCase__ : Node ):
"""simple docstring"""
UpperCamelCase = []
for action in delta:
UpperCamelCase = parent.pos_x + action[1]
UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def A ( self : Tuple , UpperCamelCase__ : Node | None ):
"""simple docstring"""
UpperCamelCase = node
UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_lowerCamelCase : str = (0, 0)
_lowerCamelCase : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_lowerCamelCase : Union[str, Any] = GreedyBestFirst(init, goal)
_lowerCamelCase : List[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_lowerCamelCase : Optional[int] = 2
for elem in grid:
print(elem)
| 430 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=UpperCamelCase__ , )
def A ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def A ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def A ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=UpperCamelCase__ , )
def A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@require_beam
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def A ( self : Any ):
"""simple docstring"""
import apache_beam as beam
UpperCamelCase = beam.io.parquetio.WriteToParquet
UpperCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase = partial(UpperCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def A ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = NestedBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 430 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__UpperCAmelCase : int = {
"n_samples": 6_4,
"horizon": 3_2,
"num_inference_steps": 2_0,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
__UpperCAmelCase : str = "hopper-medium-v2"
__UpperCAmelCase : Optional[int] = gym.make(env_name)
__UpperCAmelCase : List[Any] = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
__UpperCAmelCase : List[str] = env.reset()
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : List[str] = 1_0_0_0
__UpperCAmelCase : str = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__UpperCAmelCase : List[str] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = env.step(denorm_actions)
__UpperCAmelCase : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__UpperCAmelCase : Union[str, Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''') | 57 |
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1)) | 57 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
UpperCamelCase__ : Optional[int] = int(input('''Enter number: ''').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 105 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( lowercase_ ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=_lowercase, unet=_lowercase, scheduler=_lowercase )
@torch.no_grad()
def __call__( self, _lowercase = 1, _lowercase = None, _lowercase = 0.0, _lowercase = 50, _lowercase = "pil", _lowercase = True, **_lowercase, ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=_lowercase, )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
SCREAMING_SNAKE_CASE_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_lowercase, _lowercase )
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(_lowercase, _lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_lowercase, _lowercase, _lowercase, **_lowercase ).prev_sample
# decode the image latents with the VAE
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_lowercase ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0, 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 294 | 0 |
import numpy as np
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> Tuple:
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( __lowerCAmelCase : Any ) -> Optional[int]:
return vector * sigmoid(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """data2vec-text"""
def __init__(self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : List[Any] = type_vocab_size
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : List[Any] = use_cache
_UpperCamelCase : str = classifier_dropout
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def lowercase_ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 239 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.